|
1 /* |
|
2 * Copyright (c) 2010 Ixonos Plc. |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of the "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - Initial contribution |
|
11 * |
|
12 * Contributors: |
|
13 * Ixonos Plc |
|
14 * |
|
15 * Description: |
|
16 * Implementation for video processor. |
|
17 * |
|
18 */ |
|
19 |
|
20 |
|
21 // Include Files |
|
22 |
|
23 #include "vedcommon.h" |
|
24 #include "movieprocessorimpl.h" |
|
25 #include "statusmonitor.h" |
|
26 #include "activequeue.h" |
|
27 #include "dataprocessor.h" |
|
28 #include "h263dmai.h" // CVedH263Dec |
|
29 #include "mp4parser.h" |
|
30 #include "videoencoder.h" |
|
31 #include "videoprocessor.h" |
|
32 #include "mpeg4timer.h" |
|
33 #include "vedvolreader.h" |
|
34 #include "vedvideosettings.h" |
|
35 #include "vedavcedit.h" |
|
36 |
|
37 // Local constants |
|
38 const TUint KInitialDataBufferSize = 8192; // initial frame data buffer size |
|
39 const TUint KH263StartCodeLength = 3; // H.263 picture start code length |
|
40 const TUint KMPEG4StartCodeLength = 4; // MPEG4 picture start code length |
|
41 //const TUint KMaxEncodingDelay = 500000; // time to wait for encoding to complete in microsec. |
|
42 const TUint KDefaultTimeIncrementResolution = 30000; |
|
43 const TUint KAVCNotCodedFrameBuffer = 128; |
|
44 const TUint KMaxItemsInProcessingQueue = 3; // note! this must be synchronized with KTRMinNumberOfBuffersCodedPicture setting in transcoder! |
|
45 |
|
46 |
|
47 #ifdef _DEBUG |
|
48 const TInt KErrorCode = CVideoProcessor::EDecoderFailure; |
|
49 #else |
|
50 const TInt KErrorCode = KErrGeneral; |
|
51 #endif |
|
52 |
|
53 // An assertion macro wrapper to clean up the code a bit |
|
54 #define VDASSERT(x, n) __ASSERT_DEBUG(x, User::Panic(_L("CVideoProcessor"), EInternalAssertionFailure+n)) |
|
55 |
|
56 // Debug print macro |
|
57 |
|
58 #ifdef _DEBUG |
|
59 #include <e32svr.h> |
|
60 #define PRINT(x) RDebug::Print x; |
|
61 #else |
|
62 #define PRINT(x) |
|
63 #endif |
|
64 |
|
65 // ================= STATIC FUNCTIONS ======================= |
|
66 |
|
67 // --------------------------------------------------------- |
|
68 // AddBits |
|
69 // Static helper function to add bits to byte-buffer |
|
70 // --------------------------------------------------------- |
|
71 // |
|
72 static void AddBits(TUint8* aBuf, TInt& aBitIndex, TInt& aByteIndex, TInt aBits, TInt aNrOfBits) |
|
73 { |
|
74 // aBitIndex = 8 => first bit in the left |
|
75 // aBitIndex = 1 => last bit in the right |
|
76 while ( aBitIndex < aNrOfBits ) |
|
77 { |
|
78 // divide into n bytes |
|
79 aBuf[aByteIndex++] |= TUint8( aBits >> (aNrOfBits-aBitIndex) ); |
|
80 aNrOfBits -= aBitIndex; |
|
81 aBitIndex = 8; |
|
82 } |
|
83 // all bits fit into 1 byte |
|
84 aBitIndex -= aNrOfBits; |
|
85 aBuf[aByteIndex] |= TUint8( aBits << aBitIndex ); |
|
86 if (aBitIndex == 0) |
|
87 { |
|
88 aBitIndex = 8; |
|
89 aByteIndex++; |
|
90 } |
|
91 } |
|
92 |
|
93 |
|
94 |
|
95 // ================= MEMBER FUNCTIONS ======================= |
|
96 |
|
97 |
|
98 |
|
99 // --------------------------------------------------------- |
|
100 // CVideoProcessor::NewL |
|
101 // Symbian two-phased constructor. |
|
102 // --------------------------------------------------------- |
|
103 // |
|
104 |
|
105 CVideoProcessor* CVideoProcessor::NewL(CActiveQueue *anInputQueue, |
|
106 CVideoProcessor::TStreamParameters *aStreamParameters, |
|
107 CMovieProcessorImpl* aProcessor, |
|
108 CStatusMonitor *aStatusMonitor, |
|
109 CVedAVCEdit *aAvcEdit, |
|
110 TBool aThumbnailMode, |
|
111 TInt aPriority) |
|
112 { |
|
113 CVideoProcessor *self = new (ELeave) CVideoProcessor(anInputQueue, |
|
114 aStreamParameters, |
|
115 aProcessor, |
|
116 aStatusMonitor, |
|
117 aAvcEdit, |
|
118 aThumbnailMode, |
|
119 aPriority); |
|
120 |
|
121 CleanupStack::PushL(self); |
|
122 self->ConstructL(); |
|
123 CleanupStack::Pop(); |
|
124 |
|
125 return self; |
|
126 |
|
127 } |
|
128 |
|
129 // --------------------------------------------------------- |
|
130 // CVideoProcessor::CVideoProcessor |
|
131 // Constructor. |
|
132 // --------------------------------------------------------- |
|
133 // |
|
134 CVideoProcessor::CVideoProcessor(CActiveQueue *anInputQueue, |
|
135 TStreamParameters *aStreamParameters, |
|
136 CMovieProcessorImpl* aProcessor, |
|
137 CStatusMonitor *aStatusMonitor, |
|
138 CVedAVCEdit *aAvcEdit, |
|
139 TBool aThumbnailMode, |
|
140 TInt aPriority) : CVideoDecoder(aPriority), |
|
141 iWriteDes(0,0), |
|
142 iThumbnailMode(aThumbnailMode) |
|
143 { |
|
144 |
|
145 // Remember the objects |
|
146 iQueue = anInputQueue; |
|
147 iMonitor = aStatusMonitor; |
|
148 iProcessor = aProcessor; |
|
149 iAvcEdit = aAvcEdit; |
|
150 |
|
151 // Remember the stream parameters |
|
152 iVideoWidth = aStreamParameters->iWidth; |
|
153 iVideoHeight = aStreamParameters->iHeight; |
|
154 |
|
155 // Color Toning |
|
156 iFirstFrameQp = 0; |
|
157 |
|
158 iTiming = aStreamParameters->iTiming; |
|
159 // Reset state |
|
160 iReaderSet = EFalse; |
|
161 iDecoding = EFalse; |
|
162 iStreamEnd = EFalse; |
|
163 iPreviousFrameIncluded = EFalse; |
|
164 iFrameOperation = EDecodeAndWrite; |
|
165 |
|
166 iTrPrevious = -1; |
|
167 |
|
168 iFirstFrameFlag = ETrue; |
|
169 iDecodePending = EFalse; |
|
170 |
|
171 iTranscoderStarted = EFalse; |
|
172 iDecodingSuspended = EFalse; |
|
173 |
|
174 iStartTransitionColor = EColorNone; |
|
175 iEndTransitionColor = EColorNone; |
|
176 iStartNumberOfTransitionFrames = KNumTransitionFrames; |
|
177 iEndNumberOfTransitionFrames = KNumTransitionFrames; |
|
178 iNextTransitionNumber = -1; |
|
179 iProcessingComplete = EFalse; |
|
180 iStreamEndRead = EFalse; |
|
181 |
|
182 iPreviousTimeStamp = TTimeIntervalMicroSeconds(-1); |
|
183 |
|
184 iFirstRead = ETrue; |
|
185 |
|
186 iThumbDecoded = EFalse; |
|
187 |
|
188 iMaxItemsInProcessingQueue = KMaxItemsInProcessingQueue; |
|
189 |
|
190 iDataFormat = EDataUnknown; |
|
191 |
|
192 iLastWrittenFrameNumber = -1; |
|
193 |
|
194 iInitializing = ETrue; |
|
195 } |
|
196 |
|
197 |
|
198 // --------------------------------------------------------- |
|
199 // CVideoProcessor::~CVideoProcessor() |
|
200 // Destructor |
|
201 // --------------------------------------------------------- |
|
202 // |
|
203 CVideoProcessor::~CVideoProcessor() |
|
204 { |
|
205 |
|
206 // If we are decoding, stop |
|
207 if (iDecoding) |
|
208 Stop(); |
|
209 |
|
210 // Remove from being a reader |
|
211 if (iReaderSet) |
|
212 { |
|
213 // Return current block and all |
|
214 // blocks from input queue |
|
215 if (iBlock) |
|
216 { |
|
217 if (iQueue) |
|
218 iQueue->ReturnBlock(iBlock); |
|
219 } |
|
220 |
|
221 if (iQueue) |
|
222 iBlock = iQueue->ReadBlock(); |
|
223 |
|
224 while (iBlock) |
|
225 { |
|
226 if (iQueue) |
|
227 { |
|
228 iQueue->ReturnBlock(iBlock); |
|
229 iBlock = iQueue->ReadBlock(); |
|
230 } |
|
231 } |
|
232 iBlockPos = 0; |
|
233 |
|
234 if (iQueue) |
|
235 iQueue->RemoveReader(); |
|
236 } |
|
237 Cancel(); |
|
238 |
|
239 if (iTransCoder) |
|
240 { |
|
241 if (iTranscoderStarted) |
|
242 { |
|
243 TRAPD(error, iTransCoder->StopL()); |
|
244 if (error != KErrNone) { } |
|
245 } |
|
246 delete iTransCoder; |
|
247 iTransCoder = 0; |
|
248 } |
|
249 |
|
250 iFrameInfoArray.Reset(); |
|
251 |
|
252 // Close the decoder instance if one has been opened |
|
253 if (iDecoder) |
|
254 delete iDecoder; |
|
255 iDecoder = 0; |
|
256 |
|
257 // Deallocate buffers |
|
258 if (iDataBuffer) |
|
259 User::Free(iDataBuffer); |
|
260 |
|
261 if (iOutVideoFrameBuffer) |
|
262 User::Free(iOutVideoFrameBuffer); |
|
263 |
|
264 if (iFrameBuffer) |
|
265 User::Free(iFrameBuffer); |
|
266 |
|
267 if ( iColorTransitionBuffer ) |
|
268 User::Free( iColorTransitionBuffer ); |
|
269 |
|
270 if ( iOrigPreviousYUVBuffer ) |
|
271 User::Free( iOrigPreviousYUVBuffer ); |
|
272 |
|
273 if (iMediaBuffer) |
|
274 delete iMediaBuffer; |
|
275 iMediaBuffer = 0; |
|
276 |
|
277 if (iDecoderSpecificInfo) |
|
278 delete iDecoderSpecificInfo; |
|
279 iDecoderSpecificInfo = 0; |
|
280 |
|
281 if (iOutputVolHeader) |
|
282 delete iOutputVolHeader; |
|
283 iOutputVolHeader = 0; |
|
284 |
|
285 if (iDelayedBuffer) |
|
286 delete iDelayedBuffer; |
|
287 iDelayedBuffer = 0; |
|
288 |
|
289 if (iMPEG4Timer) |
|
290 delete iMPEG4Timer; |
|
291 iMPEG4Timer = 0; |
|
292 |
|
293 if (iTimer) |
|
294 delete iTimer; |
|
295 iTimer = 0; |
|
296 |
|
297 if (iNotCodedFrame) |
|
298 delete iNotCodedFrame; |
|
299 iNotCodedFrame = 0; |
|
300 |
|
301 } |
|
302 |
|
303 |
|
304 // --------------------------------------------------------- |
|
305 // CVideoProcessor::ConstructL() |
|
306 // Symbian 2nd phase constructor can leave. |
|
307 // --------------------------------------------------------- |
|
308 // |
|
309 void CVideoProcessor::ConstructL() |
|
310 { |
|
311 // Set as a reader to the input queue |
|
312 iQueue->SetReader(this, NULL); |
|
313 iReaderSet = ETrue; |
|
314 |
|
315 // Add us to active scheduler |
|
316 CActiveScheduler::Add(this); |
|
317 |
|
318 iMediaBuffer = new (ELeave)CCMRMediaBuffer; |
|
319 |
|
320 // Allocate buffers |
|
321 iDataBuffer = (TUint8*) User::AllocL(KInitialDataBufferSize); |
|
322 iBufferLength = KInitialDataBufferSize; |
|
323 |
|
324 if ( iThumbnailMode ) |
|
325 { |
|
326 TSize a = iProcessor->GetMovieResolution(); |
|
327 TInt length = a.iWidth*a.iHeight; |
|
328 |
|
329 length += (length>>1); |
|
330 iFrameBuffer = (TUint8*)User::AllocL(length); |
|
331 } |
|
332 |
|
333 TSize size(iVideoWidth, iVideoHeight); |
|
334 |
|
335 if (!iThumbnailMode) |
|
336 { |
|
337 // Open a decoder instance |
|
338 iDecoder = CVedH263Dec::NewL(size, 1 /*iReferencePicturesNeeded*/); |
|
339 |
|
340 // create timer |
|
341 iTimer = CCallbackTimer::NewL(*this); |
|
342 } |
|
343 |
|
344 // Make us active |
|
345 SetActive(); |
|
346 iStatus = KRequestPending; |
|
347 } |
|
348 |
|
349 |
|
350 // --------------------------------------------------------- |
|
351 // CVideoProcessor::Start |
|
352 // Starts decoding |
|
353 // (other items were commented in a header). |
|
354 // --------------------------------------------------------- |
|
355 // |
|
356 void CVideoProcessor::Start() |
|
357 { |
|
358 if ( iDecoding ) |
|
359 return; |
|
360 |
|
361 // Activate the object if we have data |
|
362 if ( (!iDecodePending) && (iStatus == KRequestPending) && iQueue->NumDataBlocks() ) |
|
363 { |
|
364 TRequestStatus *status = &iStatus; |
|
365 User::RequestComplete(status, KErrNone); |
|
366 } |
|
367 |
|
368 iDecoding = ETrue; |
|
369 } |
|
370 |
|
371 // --------------------------------------------------------- |
|
372 // CVideoProcessor::Stop |
|
373 // Stops decoding |
|
374 // (other items were commented in a header). |
|
375 // --------------------------------------------------------- |
|
376 // |
|
377 |
|
378 void CVideoProcessor::Stop() |
|
379 { |
|
380 iDecoding = EFalse; |
|
381 |
|
382 if (iTimer) |
|
383 iTimer->CancelTimer(); |
|
384 |
|
385 if (iTranscoderStarted) |
|
386 { |
|
387 TRAPD(error, iTransCoder->StopL()); |
|
388 if (error != KErrNone) { } |
|
389 iTranscoderStarted = EFalse; |
|
390 } |
|
391 } |
|
392 |
|
393 // --------------------------------------------------------- |
|
394 // CVideoProcessor::RunL |
|
395 // Standard active object running method |
|
396 // (other items were commented in a header). |
|
397 // --------------------------------------------------------- |
|
398 // |
|
399 |
|
400 void CVideoProcessor::RunL() |
|
401 { |
|
402 PRINT((_L("CVideoProcessor::RunL() in"))) |
|
403 |
|
404 // Don't decode if we aren't decoding |
|
405 if (!iDecoding) |
|
406 { |
|
407 if (!IsActive()) |
|
408 { |
|
409 SetActive(); |
|
410 iStatus = KRequestPending; |
|
411 } |
|
412 PRINT((_L("CVideoProcessor::RunL() out from !iDecoding branch"))) |
|
413 return; |
|
414 } |
|
415 |
|
416 if (iTranscoderInitPending) |
|
417 { |
|
418 iTranscoderInitPending = EFalse; |
|
419 if (iStatus != KErrNone) |
|
420 { |
|
421 if (!iThumbnailMode) |
|
422 { |
|
423 VDASSERT(iMonitor, 101); |
|
424 iMonitor->Error(iStatus.Int()); |
|
425 } |
|
426 else |
|
427 { |
|
428 iProcessor->NotifyThumbnailReady(iStatus.Int()); |
|
429 } |
|
430 |
|
431 return; |
|
432 } |
|
433 // at this point we have already read a frame, |
|
434 // so now start processing |
|
435 iTransCoder->StartL(); |
|
436 |
|
437 // stop if a fatal error has occurred in starting |
|
438 // the transcoder (decoding stopped in MtroFatalError) |
|
439 if (!iDecoding) |
|
440 return; |
|
441 |
|
442 iTranscoderStarted = ETrue; |
|
443 |
|
444 if (!iThumbnailMode) |
|
445 { |
|
446 ProcessFrameL(); |
|
447 if (iDecodePending) |
|
448 return; |
|
449 } |
|
450 else |
|
451 { |
|
452 ProcessThumb(ETrue); |
|
453 return; |
|
454 } |
|
455 } |
|
456 |
|
457 if (iDecodePending) |
|
458 { |
|
459 iDecodePending = EFalse; |
|
460 |
|
461 if (iThumbnailMode) |
|
462 { |
|
463 if (iThumbDecoded) |
|
464 { |
|
465 PRINT((_L("CVideoProcessor::RunL() - thumb decoded"))) |
|
466 ProcessThumb(EFalse); |
|
467 } |
|
468 else |
|
469 { |
|
470 PRINT((_L("CVideoProcessor::RunL() - thumb not decoded"))) |
|
471 ReadAndWriteThumbFrame(); |
|
472 } |
|
473 return; |
|
474 } |
|
475 } |
|
476 |
|
477 if (iProcessingComplete) |
|
478 { |
|
479 PRINT((_L("CVideoProcessor::RunL() iProcessingComplete == ETrue"))) |
|
480 VDASSERT(iMonitor, 102); |
|
481 iMonitor->ClipProcessed(); |
|
482 return; |
|
483 } |
|
484 |
|
485 if (iFirstColorTransitionFrame) |
|
486 { |
|
487 Process2ndColorTransitionFrameL(); |
|
488 return; |
|
489 } |
|
490 |
|
491 while (!iDecodePending && !iDelayedWrite && !iTranscoderInitPending && |
|
492 ReadFrame() ) |
|
493 { |
|
494 // process it |
|
495 if ( ProcessFrameL() ) |
|
496 { |
|
497 // clip processed up until cut-out time, stop |
|
498 if (iFrameInfoArray.Count()) |
|
499 { |
|
500 PRINT((_L("CVideoProcessor::RunL() - stream end reached, wait for frames"))); |
|
501 iStreamEnd = iStreamEndRead = ETrue; |
|
502 |
|
503 // if there are still frames to be encoded, start timer |
|
504 // since encoder may skip the rest of the frames |
|
505 if ( IsNextFrameBeingEncoded() ) |
|
506 { |
|
507 PRINT((_L("CVideoProcessor::RunL(), set timer"))); |
|
508 if ( !iTimer->IsPending() ) |
|
509 iTimer->SetTimer( TTimeIntervalMicroSeconds32( iMaxEncodingDelay ) ); |
|
510 } |
|
511 return; |
|
512 } |
|
513 |
|
514 iTimer->CancelTimer(); |
|
515 if (iTranscoderStarted) |
|
516 { |
|
517 iTransCoder->StopL(); |
|
518 iTranscoderStarted = EFalse; |
|
519 } |
|
520 VDASSERT(iMonitor, 103); |
|
521 iMonitor->ClipProcessed(); |
|
522 PRINT((_L("CVideoProcessor::RunL() out from ProcessFrameL == ETrue"))) |
|
523 return; |
|
524 } |
|
525 } |
|
526 |
|
527 if ( !iDecodePending && !iDelayedWrite && !iTranscoderInitPending ) |
|
528 { |
|
529 |
|
530 // We didn't get a frame |
|
531 if (iStreamEnd) |
|
532 { |
|
533 iStreamEndRead = ETrue; |
|
534 PRINT((_L("CVideoProcessor::RunL() - stream end reached"))); |
|
535 if (iFrameInfoArray.Count()) |
|
536 { |
|
537 PRINT((_L("CVideoProcessor::RunL() - stream end reached, wait for frames"))); |
|
538 // wait until frames have been processed |
|
539 |
|
540 // if there are still frames to be encoded, start timer |
|
541 // since encoder may skip the rest of the frames |
|
542 if ( IsNextFrameBeingEncoded() ) |
|
543 { |
|
544 PRINT((_L("CVideoProcessor::RunL(), set timer"))); |
|
545 if ( !iTimer->IsPending() ) |
|
546 iTimer->SetTimer( TTimeIntervalMicroSeconds32( iMaxEncodingDelay ) ); |
|
547 } |
|
548 return; |
|
549 } |
|
550 else |
|
551 { |
|
552 iTimer->CancelTimer(); |
|
553 VDASSERT(iMonitor, 104); |
|
554 iMonitor->ClipProcessed(); |
|
555 } |
|
556 } |
|
557 else |
|
558 { |
|
559 if (!IsActive()) |
|
560 { |
|
561 SetActive(); |
|
562 iStatus = KRequestPending; |
|
563 } |
|
564 } |
|
565 } |
|
566 |
|
567 PRINT((_L("CVideoProcessor::RunL() out"))) |
|
568 |
|
569 } |
|
570 |
|
571 // ----------------------------------------------------------------------------- |
|
572 // CVideoProcessor::RunError |
|
573 // Called by the AO framework when RunL method has leaved |
|
574 // (other items were commented in a header). |
|
575 // ----------------------------------------------------------------------------- |
|
576 // |
|
577 TInt CVideoProcessor::RunError(TInt aError) |
|
578 { |
|
579 |
|
580 if ((aError == CVedH263Dec::EDecoderNoIntra) || (aError == CVedH263Dec::EDecoderCorrupted)) |
|
581 { |
|
582 if (!iThumbnailMode) |
|
583 iMonitor->Error(KErrCorrupt); |
|
584 else |
|
585 iProcessor->NotifyThumbnailReady(KErrCorrupt); |
|
586 } |
|
587 else |
|
588 { |
|
589 if (!iThumbnailMode) |
|
590 iMonitor->Error(aError); |
|
591 else |
|
592 iProcessor->NotifyThumbnailReady(aError); |
|
593 } |
|
594 |
|
595 return KErrNone; |
|
596 } |
|
597 |
|
598 |
|
599 // --------------------------------------------------------- |
|
600 // CVideoProcessor::Process2ndColorTransitionFrameL |
|
601 // Processes the second frame of a color transition double frame |
|
602 // (other items were commented in a header). |
|
603 // --------------------------------------------------------- |
|
604 // |
|
605 TBool CVideoProcessor::Process2ndColorTransitionFrameL() |
|
606 { |
|
607 |
|
608 TFrameInformation frameInfo; |
|
609 frameInfo.iTranscoderMode = EFullWithIM; |
|
610 frameInfo.iFrameNumber = iFrameNumber; |
|
611 frameInfo.iEncodeFrame = ETrue; |
|
612 frameInfo.iKeyFrame = EFalse; |
|
613 frameInfo.iTransitionFrame = ETrue; |
|
614 frameInfo.iTransitionPosition = EPositionStartOfClip; |
|
615 frameInfo.iTransitionColor = iStartTransitionColor; |
|
616 frameInfo.iTransitionFrameNumber = iTransitionFrameNumber; |
|
617 frameInfo.iModificationApplied = EFalse; |
|
618 frameInfo.iRepeatFrame = ETrue; |
|
619 |
|
620 TInt duration; |
|
621 // get timestamp |
|
622 iProcessor->GetNextFrameDuration(duration, frameInfo.iTimeStamp, iTimeStampIndex, iTimeStampOffset); |
|
623 iTimeStampIndex++; |
|
624 |
|
625 frameInfo.iTimeStamp += iCutInTimeStamp; |
|
626 |
|
627 TTimeIntervalMicroSeconds ts = (iProcessor->GetVideoTimeInMsFromTicks(frameInfo.iTimeStamp, EFalse)) * 1000; |
|
628 |
|
629 if (ts <= iPreviousTimeStamp) |
|
630 { |
|
631 // adjust timestamp so that its bigger than ts of previous frame |
|
632 TReal frameRate = iProcessor->GetVideoClipFrameRate(); |
|
633 VDASSERT(frameRate > 0.0, 105); |
|
634 TInt64 durationMs = TInt64( ( 1000.0 / frameRate ) + 0.5 ); |
|
635 durationMs /= 2; // add half the duration of one frame |
|
636 |
|
637 ts = TTimeIntervalMicroSeconds( iPreviousTimeStamp.Int64() + durationMs*1000 ); |
|
638 |
|
639 frameInfo.iTimeStamp = iProcessor->GetVideoTimeInTicksFromMs( ts.Int64()/1000, EFalse ); |
|
640 |
|
641 ts = iProcessor->GetVideoTimeInMsFromTicks(frameInfo.iTimeStamp, EFalse) * 1000; |
|
642 |
|
643 PRINT((_L("CVideoProcessor::Process2ndColorTransitionFrameL() - adjusted timestamp, prev = %d, new = %d"), |
|
644 I64INT( iPreviousTimeStamp.Int64() ) / 1000, I64INT( ts.Int64() ) / 1000)); |
|
645 |
|
646 } |
|
647 |
|
648 |
|
649 iFrameInfoArray.Append(frameInfo); |
|
650 |
|
651 iPreviousTimeStamp = ts; |
|
652 |
|
653 iFirstColorTransitionFrame = EFalse; |
|
654 |
|
655 CCMRMediaBuffer::TBufferType bt = |
|
656 (iDataFormat == EDataH263) ? CCMRMediaBuffer::EVideoH263 : CCMRMediaBuffer::EVideoMPEG4; |
|
657 |
|
658 if (!iNotCodedFrame) |
|
659 GenerateNotCodedFrameL(); |
|
660 |
|
661 PRINT((_L("CVideoProcessor::Process2ndColorTransitionFrameL() - sending not coded"))); |
|
662 |
|
663 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
664 if (iDataFormat == EDataAVC) |
|
665 { |
|
666 TPtr8 ptr(iNotCodedFrame->Des()); |
|
667 |
|
668 TInt length = iNotCodedFrame->Length(); |
|
669 iAvcEdit->ProcessAVCBitStreamL((TDes8&)(ptr), length, 0 /*dummy*/, EFalse ); |
|
670 ptr.SetLength(length); |
|
671 iDataLength = iCurrentFrameLength = length; |
|
672 } |
|
673 #endif |
|
674 |
|
675 iMediaBuffer->Set( TPtrC8(iNotCodedFrame->Des().Ptr(), iNotCodedFrame->Length()), |
|
676 bt, |
|
677 iNotCodedFrame->Length(), |
|
678 EFalse, |
|
679 ts ); |
|
680 |
|
681 iDecodePending = ETrue; |
|
682 if (!IsActive()) |
|
683 { |
|
684 SetActive(); |
|
685 iStatus = KRequestPending; |
|
686 } |
|
687 |
|
688 PRINT((_L("CVideoProcessor::Process2ndColorTransitionFrameL() - WriteCodedBuffer, frame #%d, timestamp %d ms"), |
|
689 iFrameNumber, I64INT( ts.Int64() ) / 1000 )); |
|
690 iTransCoder->WriteCodedBufferL(iMediaBuffer); |
|
691 |
|
692 iFrameNumber++; |
|
693 |
|
694 return ETrue; |
|
695 |
|
696 } |
|
697 |
|
698 |
|
699 // --------------------------------------------------------- |
|
700 // CVideoProcessor::GenerateNotCodedFrameL |
|
701 // Generate bitstream for not coded frame |
|
702 // (other items were commented in a header). |
|
703 // --------------------------------------------------------- |
|
704 // |
|
705 void CVideoProcessor::GenerateNotCodedFrameL() |
|
706 { |
|
707 |
|
708 TSize resolution = iProcessor->GetVideoClipResolution(); |
|
709 |
|
710 if (iDataFormat == EDataH263) |
|
711 { |
|
712 // H.263 QCIF picture header |
|
713 TInt headerSize = 7; |
|
714 TUint8 notCodedH263[] = { 0x00, 0x00, 0x80, 0x02, 0x0a, 0x0c, 0x3f }; |
|
715 TUint8 lsbMask[8] = { 255, 254, 252, 248, 240, 224, 192, 128 }; |
|
716 |
|
717 |
|
718 if ( resolution == TSize(128,96) ) |
|
719 notCodedH263[4] = 0x06; // set source format as sub-QCIF |
|
720 |
|
721 else if ( resolution == TSize(352, 288) ) |
|
722 notCodedH263[4] = 0x0e; // set source format as CIF |
|
723 |
|
724 else if ( resolution != TSize(176,144) ) |
|
725 User::Panic(_L("CVideoProcessor"), EInternalAssertionFailure); |
|
726 |
|
727 TInt numMBs = ( resolution.iWidth / 16 ) * ( resolution.iHeight / 16 ); |
|
728 |
|
729 // one COD bit for each MB, the last byte of the pic header already contains 6 MB bits |
|
730 TInt bitsLeft = numMBs - 6; |
|
731 |
|
732 TInt bufSize = headerSize + ( bitsLeft / 8 ) + ( bitsLeft % 8 != 0 ); |
|
733 |
|
734 VDASSERT(!iNotCodedFrame, 117); |
|
735 iNotCodedFrame = (HBufC8*) HBufC8::NewL(bufSize); |
|
736 |
|
737 TPtr8 buf(iNotCodedFrame->Des()); |
|
738 buf.Copy(notCodedH263, headerSize); |
|
739 |
|
740 TInt index = headerSize; |
|
741 TUint8* ptr = const_cast<TUint8*>(buf.Ptr()); |
|
742 |
|
743 // set COD bit to 1 for all macroblocks |
|
744 while (bitsLeft >= 8) |
|
745 { |
|
746 ptr[index++] = 0xff; |
|
747 bitsLeft -= 8; |
|
748 } |
|
749 |
|
750 if (bitsLeft) |
|
751 { |
|
752 TUint8 val = 0; |
|
753 val |= lsbMask[8 - bitsLeft]; |
|
754 ptr[index] = val; |
|
755 } |
|
756 buf.SetLength(bufSize); |
|
757 } |
|
758 |
|
759 else if (iDataFormat == EDataMPEG4) |
|
760 { |
|
761 VDASSERT(iDataFormat == EDataMPEG4, 115); |
|
762 |
|
763 TUint8 vopStartCodeMPEG4[] = { 0x00, 0x00, 0x01, 0xb6 }; |
|
764 |
|
765 TInt headerSize = 4; |
|
766 |
|
767 // calculate the number of bits needed for vop_time_increment |
|
768 TInt numTiBits; |
|
769 for (numTiBits = 1; ((iInputTimeIncrementResolution - 1) >> numTiBits) != 0; numTiBits++) |
|
770 { |
|
771 } |
|
772 |
|
773 VDASSERT(numTiBits <= 16, 116); |
|
774 |
|
775 TInt numMBs = ( resolution.iWidth / 16 ) * ( resolution.iHeight / 16 ); |
|
776 |
|
777 // get VOP size |
|
778 // vop_start_code: 32 |
|
779 // vop_coding_type + modulo_time_base + marker_bit: 4 |
|
780 // no. of bits for vop_time_increment |
|
781 // marker_bit + vop_coded bit: 2 |
|
782 // rounding_type: 1 |
|
783 // intra_dc_vlc_thr: 3 |
|
784 // vop_quant: 5 |
|
785 // vop_fcode_forward: 3 |
|
786 // not_coded for each MB: numMBs |
|
787 TInt bufSizeBits = headerSize * 8 + 4 + numTiBits + 2 + 1 + 3 + 5 + 3 + numMBs;//DP mode not included! |
|
788 if ( (iInputStreamMode == EVedVideoBitstreamModeMPEG4DP) |
|
789 || (iInputStreamMode == EVedVideoBitstreamModeMPEG4DP_RVLC) |
|
790 || (iInputStreamMode == EVedVideoBitstreamModeMPEG4Resyn_DP) |
|
791 || (iInputStreamMode == EVedVideoBitstreamModeMPEG4Resyn_DP_RVLC) |
|
792 ) |
|
793 { |
|
794 // Motion marker in DP mode |
|
795 bufSizeBits+=17; |
|
796 } |
|
797 TInt bufSize = ( bufSizeBits / 8 ) + 1; // always 1-8 stuffing bits |
|
798 |
|
799 VDASSERT(!iNotCodedFrame, 118); |
|
800 iNotCodedFrame = (HBufC8*) HBufC8::NewL(bufSize); |
|
801 |
|
802 TPtr8 buf(iNotCodedFrame->Des()); |
|
803 buf.SetLength(bufSize); |
|
804 buf.FillZ(); |
|
805 buf.SetLength(0); |
|
806 buf.Copy(vopStartCodeMPEG4, headerSize); |
|
807 |
|
808 TUint8* ptr = const_cast<TUint8*>(buf.Ptr()); |
|
809 TInt shift = 8; |
|
810 TInt index = headerSize; |
|
811 AddBits(ptr, shift, index, 1, 2); // vop_coding_type |
|
812 AddBits(ptr, shift, index, 0, 1); // modulo_time_base |
|
813 AddBits(ptr, shift, index, 1, 1); // marker_bit |
|
814 |
|
815 // vop_time_increment is left to zero (skip FillZ bits) |
|
816 AddBits(ptr, shift, index, 0, numTiBits); |
|
817 |
|
818 // marker (1 bit; 1) |
|
819 AddBits(ptr, shift, index, 1, 1); |
|
820 // vop_coded (1 bit; 1=coded) |
|
821 AddBits(ptr, shift, index, 1, 1); |
|
822 |
|
823 // vop_rounding_type (1 bit) (0) |
|
824 AddBits(ptr, shift, index, 0, 1); |
|
825 |
|
826 // intra_dc_vlc_thr (3 bits) (0 = Intra DC, but don't care) |
|
827 AddBits(ptr, shift, index, 0, 3); |
|
828 |
|
829 // vop_quant (5 bits) (1-31) |
|
830 AddBits(ptr, shift, index, 10, 5); |
|
831 |
|
832 // vop_fcode_forward (3 bits) (1-7, 0 forbidden) |
|
833 AddBits(ptr, shift, index, 1, 3); |
|
834 |
|
835 // Macroblocks |
|
836 |
|
837 // one COD bit for each MB |
|
838 TInt bitsLeft = numMBs; |
|
839 |
|
840 // set COD bit to 1 for all macroblocks (== not coded) |
|
841 while (bitsLeft >= 8) |
|
842 { |
|
843 AddBits(ptr, shift, index, 0xff, 8); |
|
844 bitsLeft -= 8; |
|
845 } |
|
846 |
|
847 TUint8 lsb[8] = { 0xff, 0x7f, 0x3f, 0x1f, 0x0f, 0x07, 0x03, 0x01 }; |
|
848 if (bitsLeft) |
|
849 { |
|
850 TUint8 val = 0; |
|
851 val = lsb[8 - bitsLeft]; |
|
852 AddBits(ptr, shift, index, val, bitsLeft); |
|
853 } |
|
854 // If DP mode is used, should add motion marker here: 1 11110000 00000001 |
|
855 if ( (iInputStreamMode == EVedVideoBitstreamModeMPEG4DP) |
|
856 || (iInputStreamMode == EVedVideoBitstreamModeMPEG4DP_RVLC) |
|
857 || (iInputStreamMode == EVedVideoBitstreamModeMPEG4Resyn_DP) |
|
858 || (iInputStreamMode == EVedVideoBitstreamModeMPEG4Resyn_DP_RVLC) |
|
859 ) |
|
860 { |
|
861 AddBits(ptr, shift, index, 0x01, 1); |
|
862 AddBits(ptr, shift, index, 0xf0, 8); |
|
863 AddBits(ptr, shift, index, 0x01, 8); |
|
864 } |
|
865 |
|
866 // insert stuffing in last byte |
|
867 TUint8 stuffing[8] = { 0x00, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f }; |
|
868 ptr[bufSize - 1] |= stuffing[shift-1]; |
|
869 |
|
870 buf.SetLength(bufSize); |
|
871 |
|
872 } |
|
873 else |
|
874 { |
|
875 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
876 |
|
877 VDASSERT(iDataFormat == EDataAVC, 115); |
|
878 |
|
879 VDASSERT(!iNotCodedFrame, 118); |
|
880 iNotCodedFrame = (HBufC8*) HBufC8::NewL(KAVCNotCodedFrameBuffer); |
|
881 |
|
882 TPtr8 buf( const_cast<TUint8*>(iNotCodedFrame->Des().Ptr()), |
|
883 KAVCNotCodedFrameBuffer, KAVCNotCodedFrameBuffer ); |
|
884 |
|
885 TInt len = iAvcEdit->GenerateNotCodedFrame( buf, iModifiedFrameNumber++ ); |
|
886 |
|
887 if (len == 0) |
|
888 User::Leave(KErrArgument); |
|
889 |
|
890 TPtr8 temp(iNotCodedFrame->Des()); |
|
891 temp.SetLength(len); |
|
892 #else |
|
893 VDASSERT(0, 190); |
|
894 #endif |
|
895 } |
|
896 |
|
897 |
|
898 } |
|
899 |
|
900 // --------------------------------------------------------- |
|
901 // CVideoProcessor::ProcessFrameL |
|
902 // Processes one input frame |
|
903 // (other items were commented in a header). |
|
904 // --------------------------------------------------------- |
|
905 // |
|
906 TBool CVideoProcessor::ProcessFrameL() |
|
907 { |
|
908 |
|
909 |
|
910 PRINT((_L("CVideoProcessor::ProcessFrameL() begin"))); |
|
911 |
|
912 VDASSERT(iCurrentFrameLength,1); |
|
913 |
|
914 TInt frameInRange = 0; |
|
915 TInt frameDuration = 0; |
|
916 TBool keyFrame = EFalse; |
|
917 TTimeIntervalMicroSeconds startCutTime = TTimeIntervalMicroSeconds(0); |
|
918 TTimeIntervalMicroSeconds endCutTime = TTimeIntervalMicroSeconds(0); |
|
919 TTimeIntervalMicroSeconds frameTime = TTimeIntervalMicroSeconds(0); |
|
920 |
|
921 TInt trP = iProcessor->GetTrPrevNew(); |
|
922 TInt trD = iProcessor->GetTrPrevOrig(); |
|
923 |
|
924 // transitions |
|
925 iTransitionFrame = 0; // is this a transition frame? |
|
926 iTransitionPosition = EPositionNone; |
|
927 iTransitionColor = EColorNone; |
|
928 iFirstTransitionFrame = 0; // is this the first transition frame in this instance? |
|
929 TBool endColorTransitionFrame = EFalse; |
|
930 |
|
931 TBool decodeCurrentFrame = 0; // do we need to decode frame for transition effect? |
|
932 |
|
933 // book-keeping |
|
934 startCutTime = iProcessor->GetStartCutTime(); |
|
935 endCutTime = iProcessor->GetEndCutTime(); |
|
936 iRepeatFrame = EFalse; |
|
937 |
|
938 if(iInitializing) |
|
939 { |
|
940 |
|
941 // determine if we need to do full transcoding |
|
942 iFullTranscoding = DetermineResolutionChange() || DetermineFrameRateChange() || |
|
943 DetermineBitRateChange(); |
|
944 |
|
945 // Do full transcoding for MPEG-4 => H.263. MPEG-4 => MPEG-4 and H.263 => MPEG-4 can be done in compressed domain |
|
946 if ( iProcessor->GetCurrentClipVideoType() == EVedVideoTypeMPEG4SimpleProfile && |
|
947 iProcessor->GetOutputVideoType() != EVedVideoTypeMPEG4SimpleProfile ) |
|
948 iFullTranscoding = ETrue; |
|
949 |
|
950 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
951 // Do full transcoding for AVC => H.263/MPEG-4 |
|
952 if ( iProcessor->GetCurrentClipVideoType() == EVedVideoTypeAVCBaselineProfile && |
|
953 iProcessor->GetOutputVideoType() != EVedVideoTypeAVCBaselineProfile ) |
|
954 iFullTranscoding = ETrue; |
|
955 |
|
956 // Do full transcoding for H.263/MPEG-4 => AVC |
|
957 if ( iProcessor->GetCurrentClipVideoType() != EVedVideoTypeAVCBaselineProfile && |
|
958 iProcessor->GetOutputVideoType() == EVedVideoTypeAVCBaselineProfile ) |
|
959 iFullTranscoding = ETrue; |
|
960 |
|
961 // Do color effects for AVC in spatial domain |
|
962 if ( iProcessor->GetOutputVideoType() == EVedVideoTypeAVCBaselineProfile && |
|
963 iProcessor->GetColorEffect() != EVedColorEffectNone ) |
|
964 iFullTranscoding = ETrue; |
|
965 #endif |
|
966 |
|
967 // determine transition parameters for the clip |
|
968 DetermineClipTransitionParameters(iTransitionEffect,iStartOfClipTransition, |
|
969 iEndOfClipTransition,iStartTransitionColor,iEndTransitionColor); |
|
970 |
|
971 if (!iTransCoder) |
|
972 { |
|
973 // initialize transcoder, normal mode |
|
974 CreateAndInitializeTranscoderL(iProcessor->GetCurrentClipVideoType(), CTRTranscoder::EFullTranscoding); |
|
975 return EFalse; |
|
976 } |
|
977 |
|
978 if ( iColorTransitionBuffer ) |
|
979 { |
|
980 User::Free( iColorTransitionBuffer ); |
|
981 iColorTransitionBuffer = 0; |
|
982 } |
|
983 |
|
984 if ( iOrigPreviousYUVBuffer ) |
|
985 { |
|
986 User::Free( iOrigPreviousYUVBuffer ); |
|
987 iOrigPreviousYUVBuffer = 0; |
|
988 } |
|
989 |
|
990 iFirstFrameInRange = 0; |
|
991 iFirstIncludedFrameNumber = -1; |
|
992 iTimeStampIndex = 0; |
|
993 iTimeStampOffset = 0; |
|
994 //iProcessor->iOutputFramesInClip=0; |
|
995 iPreviousFrameIncluded = EFalse; |
|
996 iNumberOfFrames = iProcessor->GetClipNumberOfFrames(); |
|
997 |
|
998 // calculate number of included frames |
|
999 if(iTransitionEffect) |
|
1000 { |
|
1001 GetNumberOfTransitionFrames(startCutTime, endCutTime); |
|
1002 } |
|
1003 iInitializing = EFalse; |
|
1004 } |
|
1005 |
|
1006 TInt startFrame = iProcessor->GetOutputNumberOfFrames() - iNumberOfFrames; |
|
1007 TInt absFrameNumber = startFrame + iFrameNumber; |
|
1008 |
|
1009 VDASSERT(startCutTime <= endCutTime,2); |
|
1010 |
|
1011 // microseconds |
|
1012 frameTime = TTimeIntervalMicroSeconds(iProcessor->GetVideoTimeInMsFromTicks( |
|
1013 iProcessor->VideoFrameTimeStamp(absFrameNumber), EFalse) * 1000); |
|
1014 keyFrame = iProcessor->VideoFrameType(absFrameNumber); |
|
1015 |
|
1016 TInt cur = absFrameNumber; |
|
1017 TInt next = cur+1; |
|
1018 |
|
1019 TTimeIntervalMicroSeconds frameDurationInMicroSec(0); |
|
1020 |
|
1021 // frameDuration is in ticks, with timescale of the current input clip |
|
1022 if(next >= iProcessor->GetOutputNumberOfFrames()) |
|
1023 { |
|
1024 frameDurationInMicroSec = |
|
1025 (iProcessor->GetVideoTimeInMsFromTicks(iProcessor->GetVideoClipDuration(), EFalse) * TInt64(1000)) - frameTime.Int64(); |
|
1026 |
|
1027 frameDuration = I64INT(iProcessor->GetVideoClipDuration() - iProcessor->VideoFrameTimeStamp(cur) ); |
|
1028 } |
|
1029 else |
|
1030 { |
|
1031 frameDuration = I64INT( iProcessor->VideoFrameTimeStamp(next) - iProcessor->VideoFrameTimeStamp(cur) ); |
|
1032 frameDurationInMicroSec = |
|
1033 TTimeIntervalMicroSeconds(iProcessor->GetVideoTimeInMsFromTicks(TInt64(frameDuration), EFalse) * 1000); |
|
1034 } |
|
1035 |
|
1036 TTimeIntervalMicroSeconds frameEndTime = |
|
1037 TTimeIntervalMicroSeconds( frameTime.Int64() + frameDurationInMicroSec.Int64() ); |
|
1038 |
|
1039 // endCutTime is in TTimeIntervalMicroSeconds |
|
1040 |
|
1041 // check if frame is in range for decoding/editing |
|
1042 frameInRange = ((frameEndTime <= endCutTime) ? 1 : 0); |
|
1043 if(frameInRange) |
|
1044 { |
|
1045 |
|
1046 // transition is applied only for frame included in the output movie |
|
1047 if(frameTime >= startCutTime) |
|
1048 { |
|
1049 // find the offset for the first included frame in the clip |
|
1050 if(!iFirstFrameInRange) |
|
1051 { |
|
1052 iFirstFrameInRange = 1; |
|
1053 iFirstIncludedFrameNumber = iFrameNumber; |
|
1054 iModifiedFrameNumber = iFrameNumber + 1; // +1 since number is incremented after modifying |
|
1055 } |
|
1056 TInt relativeIncludedFrameNumber = iFrameNumber - iFirstIncludedFrameNumber; |
|
1057 |
|
1058 if(iTransitionEffect) |
|
1059 { |
|
1060 // check if this is a transition frame & set transition parameters |
|
1061 SetTransitionFrameParams(relativeIncludedFrameNumber, decodeCurrentFrame); |
|
1062 } |
|
1063 } |
|
1064 |
|
1065 // check if this is an end color transition frame |
|
1066 if ( iTransitionFrame && iTransitionPosition == EPositionEndOfClip && |
|
1067 iEndTransitionColor == EColorTransition ) |
|
1068 { |
|
1069 endColorTransitionFrame = ETrue; |
|
1070 iFrameToEncode = EFalse; |
|
1071 } |
|
1072 |
|
1073 // check if we need to include this frame into output movie |
|
1074 if (frameTime < startCutTime) |
|
1075 { |
|
1076 // decode, but do not include in output movie |
|
1077 // iPreviousFrameIncluded = EFalse; |
|
1078 iFrameToEncode = EFalse; |
|
1079 iFrameOperation = EDecodeNoWrite; |
|
1080 // for decoding frames not writable to output movie, do not decode |
|
1081 // with any effects, because all information is need at P->I conversion |
|
1082 } |
|
1083 else // include in output movie |
|
1084 { |
|
1085 |
|
1086 // check if we need to encode it again as I-frame |
|
1087 if (iFullTranscoding || (!iPreviousFrameIncluded && !keyFrame) || iTransitionFrame) |
|
1088 { |
|
1089 // need to decode as P and encode as I |
|
1090 |
|
1091 if (!endColorTransitionFrame) |
|
1092 iFrameToEncode = ETrue; |
|
1093 |
|
1094 iFrameOperation = EDecodeNoWrite; |
|
1095 // for first decoding of P frame in a clip, do not decode with any effects; |
|
1096 // instead, apply the effects in the spatial domain after decoding it as P; |
|
1097 // then feed it to the encoder with the applied special effects |
|
1098 } |
|
1099 else |
|
1100 { |
|
1101 |
|
1102 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
1103 // check if we need to encode AVC frames after |
|
1104 // encoded cut frame or starting transition |
|
1105 if (iDataFormat == EDataAVC && iEncodeUntilIDR) |
|
1106 { |
|
1107 TPtr8 ptr(iDataBuffer, iCurrentFrameLength, iBufferLength); |
|
1108 |
|
1109 if (iAvcEdit->IsNALUnitIDR(ptr)) |
|
1110 iEncodeUntilIDR = 0; |
|
1111 else |
|
1112 { |
|
1113 // encode |
|
1114 iFrameOperation = EDecodeNoWrite; |
|
1115 if (!endColorTransitionFrame) |
|
1116 iFrameToEncode = ETrue; |
|
1117 } |
|
1118 } |
|
1119 #endif |
|
1120 |
|
1121 if (!iEncodeUntilIDR) |
|
1122 { |
|
1123 // just copy the frame data as it is |
|
1124 |
|
1125 TInt colorEffect = TColorEffect2TInt(iProcessor->GetColorEffect()); |
|
1126 |
|
1127 iFrameToEncode = EFalse; |
|
1128 if(decodeCurrentFrame) |
|
1129 iFrameOperation = EDecodeAndWrite; |
|
1130 else |
|
1131 iFrameOperation = (colorEffect==0/*None*/ ? EWriteNoDecode : EDecodeAndWrite); |
|
1132 } |
|
1133 } |
|
1134 iPreviousFrameIncluded = ETrue; |
|
1135 } |
|
1136 |
|
1137 } |
|
1138 else |
|
1139 { |
|
1140 // no need to include frame in output movie |
|
1141 iPreviousFrameIncluded = EFalse; |
|
1142 iFrameToEncode = EFalse; |
|
1143 iFrameOperation = ENoDecodeNoWrite; |
|
1144 |
|
1145 // stop processing |
|
1146 return ETrue; |
|
1147 } |
|
1148 |
|
1149 TBool modeChanged = GetModeChangeL(); // do we need to change the current mode? |
|
1150 |
|
1151 /* added to handle Mp4Specific size problem */ |
|
1152 if(modeChanged && !iFullTranscoding) |
|
1153 { |
|
1154 iProcessor->SetClipModeChanged(modeChanged); //if it is not set, it will be default false |
|
1155 } |
|
1156 |
|
1157 if (iFrameOperation == EDecodeAndWrite) |
|
1158 PRINT((_L("CVideoProcessor::ProcessFrameL() frame operation = EDecodeAndWrite"))); |
|
1159 if (iFrameOperation == EWriteNoDecode) |
|
1160 PRINT((_L("CVideoProcessor::ProcessFrameL() frame operation = EWriteNoDecode"))); |
|
1161 if (iFrameOperation == EDecodeNoWrite) |
|
1162 PRINT((_L("CVideoProcessor::ProcessFrameL() frame operation = EDecodeNoWrite"))); |
|
1163 if (iFrameOperation == ENoDecodeNoWrite) |
|
1164 PRINT((_L("CVideoProcessor::ProcessFrameL() frame operation = ENoDecodeNoWrite"))); |
|
1165 |
|
1166 PRINT((_L("CVideoProcessor::ProcessFrameL() iFrameToEncode = %d"), iFrameToEncode)); |
|
1167 |
|
1168 TBool volHeaderIncluded = EFalse; |
|
1169 |
|
1170 if( (iFrameOperation == EDecodeAndWrite) || (iFrameOperation == EWriteNoDecode) || |
|
1171 ((iFrameOperation == EDecodeNoWrite) && !iFullTranscoding && iFirstFrameFlag) ) |
|
1172 // the last line is to enable processing of the 1st frame also if it would be decoded with transcoder, |
|
1173 // to enable processing of the MPEG-4 VOL header by vedh263d. |
|
1174 { |
|
1175 |
|
1176 TPtr8 ptr(0,0); |
|
1177 TBool doCompressedDomainTC = modeChanged || iProcessor->GetColorEffect() != EVedColorEffectNone; |
|
1178 |
|
1179 // If we need to do compressed domain bitstream manipulation at some |
|
1180 // point of the clip, all frames must be decoded by vedh263d to be |
|
1181 // able to start bitstream modification in the middle of the clip, e.g. |
|
1182 // after a transition. If we are processing MPEG-4, all frames are |
|
1183 // manipulated by the decoder for changing timing information |
|
1184 |
|
1185 if ( doCompressedDomainTC || (iDataFormat == EDataMPEG4 /*&& !iTransitionFrame*/) ) |
|
1186 { |
|
1187 // use h263decoder to do bitstream modification |
|
1188 |
|
1189 // (if this is an end color transition frame, iFrameOperation is |
|
1190 // EDecodeNoWrite && iFrameToEncode == 0 |
|
1191 |
|
1192 TInt frameOp = 1; // EDecodeAndWrite |
|
1193 |
|
1194 if ( iFrameOperation == EDecodeNoWrite ) |
|
1195 frameOp = 2; |
|
1196 |
|
1197 if ( iFrameOperation == EWriteNoDecode && !modeChanged ) |
|
1198 frameOp = 3; // EWriteNoDecode |
|
1199 |
|
1200 TInt frameSize; |
|
1201 |
|
1202 if (iDataFormat == EDataMPEG4 && iFirstFrameFlag) |
|
1203 { |
|
1204 InsertDecoderSpecificInfoL(); |
|
1205 volHeaderIncluded = ETrue; |
|
1206 } |
|
1207 |
|
1208 // use h263decoder to do compressed domain transcoding |
|
1209 PRINT((_L("CVideoProcessor::ProcessFrameL() decode using vedh263d"))); |
|
1210 DecodeFrameL(frameOp, modeChanged, frameSize); |
|
1211 ptr.Set(iOutVideoFrameBuffer, frameSize, frameSize); |
|
1212 } |
|
1213 else |
|
1214 { |
|
1215 // copy bitstream directly |
|
1216 ptr.Set(iDataBuffer, iCurrentFrameLength, iCurrentFrameLength); |
|
1217 } |
|
1218 |
|
1219 if (iFrameOperation == EDecodeAndWrite || iFrameOperation == EWriteNoDecode) |
|
1220 { |
|
1221 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
1222 if (iDataFormat == EDataAVC && iTransitionEffect && |
|
1223 iStartTransitionColor == EColorTransition) |
|
1224 { |
|
1225 if (!(iAvcEdit->IsNALUnitIDR(ptr))) |
|
1226 { |
|
1227 // modify frame number |
|
1228 VDASSERT( (iFrameNumber > iFirstIncludedFrameNumber), 182 ); |
|
1229 iAvcEdit->ModifyFrameNumber(ptr, iModifiedFrameNumber++); |
|
1230 PRINT((_L("CVideoProcessor::ProcessFrameL() modified frame no. => #%d"), iModifiedFrameNumber - 1)); |
|
1231 } |
|
1232 else |
|
1233 iModifiedFrameNumber = 1; // this frame is IDR, start numbering from zero |
|
1234 } |
|
1235 #endif |
|
1236 // Write to file |
|
1237 if ( WriteFrameToFileL(ptr, frameDuration, absFrameNumber) ) |
|
1238 return ETrue; |
|
1239 } |
|
1240 |
|
1241 if (iFrameOperation == EWriteNoDecode || |
|
1242 (iFrameOperation == EDecodeAndWrite && !decodeCurrentFrame && doCompressedDomainTC)) |
|
1243 { |
|
1244 // NOTE: The 2nd condition is for B&W & compr.domain TC |
|
1245 |
|
1246 // if we are doing only compressed domain transcoding, theres no need to |
|
1247 // decode the frame using transcoder |
|
1248 |
|
1249 // Throw away the data for this frame: |
|
1250 VDASSERT(iDataLength >= iCurrentFrameLength,4); |
|
1251 Mem::Copy(iDataBuffer, iDataBuffer + iCurrentFrameLength, |
|
1252 iDataLength - iCurrentFrameLength); |
|
1253 iDataLength = iDataLength - iCurrentFrameLength; |
|
1254 iCurrentFrameLength = 0; |
|
1255 |
|
1256 // update and fetch the new Time Code for MPEG4 ES |
|
1257 if (iProcessor->GetOutputVideoType() == EVedVideoTypeMPEG4SimpleProfile && |
|
1258 frameInRange && (frameTime >= startCutTime) ) |
|
1259 |
|
1260 { |
|
1261 iMPEG4Timer->UpdateMPEG4Time(absFrameNumber, iFrameNumber, iProcessor->GetSlowMotionSpeed()); |
|
1262 } |
|
1263 |
|
1264 iFrameNumber++; |
|
1265 return EFalse; |
|
1266 } |
|
1267 } |
|
1268 |
|
1269 // process using transcoder |
|
1270 if (iFrameOperation == EDecodeNoWrite || iFrameOperation == EDecodeAndWrite) |
|
1271 { |
|
1272 WriteFrameToTranscoderL(absFrameNumber, keyFrame, volHeaderIncluded); |
|
1273 } |
|
1274 |
|
1275 // update and fetch the new Time Code for MPEG4 ES |
|
1276 if (iProcessor->GetOutputVideoType() == EVedVideoTypeMPEG4SimpleProfile && |
|
1277 frameInRange && (frameTime >= startCutTime) ) |
|
1278 { |
|
1279 iMPEG4Timer->UpdateMPEG4Time(absFrameNumber, iFrameNumber, iProcessor->GetSlowMotionSpeed()); |
|
1280 } |
|
1281 |
|
1282 iProcessor->SetTrPrevNew(trP); |
|
1283 iProcessor->SetTrPrevOrig(trD); |
|
1284 |
|
1285 if (!iFirstColorTransitionFrame) |
|
1286 iFrameNumber++; |
|
1287 |
|
1288 PRINT((_L("CVideoProcessor::ProcessFrameL() end"))); |
|
1289 |
|
1290 return EFalse; |
|
1291 } |
|
1292 |
|
1293 |
|
1294 // --------------------------------------------------------- |
|
1295 // CVideoProcessor::WriteFrameToFileL |
|
1296 // Write frame to file |
|
1297 // (other items were commented in a header). |
|
1298 // --------------------------------------------------------- |
|
1299 // |
|
1300 TBool CVideoProcessor::WriteFrameToFileL(TPtr8& aBuf, TInt aDurationInTicks, TInt aFrameNumber) |
|
1301 { |
|
1302 |
|
1303 // if there's a frame waiting to be encoded, we must not |
|
1304 // write this frame now. It will be written after all to-be-encoded |
|
1305 // frames have been written. New frames must not be processed |
|
1306 // until the delayed write has been completed |
|
1307 |
|
1308 iDelayedWrite = !IsEncodeQueueEmpty(); |
|
1309 |
|
1310 if (iDelayedWrite) |
|
1311 { |
|
1312 PRINT((_L("CVideoProcessor::WriteFrameToFileL() delayed write"))); |
|
1313 // save frame for later writing |
|
1314 if (iDelayedBuffer) |
|
1315 delete iDelayedBuffer; |
|
1316 iDelayedBuffer = 0; |
|
1317 |
|
1318 iDelayedBuffer = (HBufC8*) HBufC8::NewL(aBuf.Length()); |
|
1319 |
|
1320 TPtr8 db(iDelayedBuffer->Des()); |
|
1321 db.Copy(aBuf); |
|
1322 |
|
1323 iDelayedTimeStamp = iProcessor->VideoFrameTimeStamp(aFrameNumber) + iTimeStampOffset; |
|
1324 iDelayedKeyframe = iProcessor->VideoFrameType(aFrameNumber); |
|
1325 iDelayedFrameNumber = iFrameNumber; |
|
1326 if ( IsNextFrameBeingEncoded() ) |
|
1327 { |
|
1328 // start timer to wait for encoding to complete |
|
1329 if ( !iTimer->IsPending() ) |
|
1330 iTimer->SetTimer( TTimeIntervalMicroSeconds32( iMaxEncodingDelay ) ); |
|
1331 } |
|
1332 } |
|
1333 else |
|
1334 { |
|
1335 // write now |
|
1336 TInt error = iProcessor->WriteVideoFrameToFile(aBuf, |
|
1337 ( iProcessor->VideoFrameTimeStamp(aFrameNumber) + iTimeStampOffset ), |
|
1338 aDurationInTicks, iProcessor->VideoFrameType(aFrameNumber), EFalse, EFalse, EFalse ); |
|
1339 |
|
1340 // If movie has reached maximum size then stop processing |
|
1341 if (error == KErrCompletion) |
|
1342 { |
|
1343 iFrameInfoArray.Reset(); |
|
1344 return ETrue; |
|
1345 } |
|
1346 |
|
1347 // save frame number |
|
1348 iLastWrittenFrameNumber = iFrameNumber; |
|
1349 |
|
1350 User::LeaveIfError(error); |
|
1351 } |
|
1352 |
|
1353 return EFalse; |
|
1354 |
|
1355 } |
|
1356 |
|
1357 |
|
1358 // --------------------------------------------------------- |
|
1359 // CVideoProcessor::WriteFrameToTranscoderL |
|
1360 // Write frame to transcoder |
|
1361 // (other items were commented in a header). |
|
1362 // --------------------------------------------------------- |
|
1363 // |
|
1364 void CVideoProcessor::WriteFrameToTranscoderL(TInt aFrameNumber, TBool aKeyFrame, TBool aVolHeaderInBuffer) |
|
1365 { |
|
1366 |
|
1367 VDASSERT(iDataFormat != EDataUnknown, 30); |
|
1368 |
|
1369 // TODO: new buffertype for H.264 |
|
1370 CCMRMediaBuffer::TBufferType bt = |
|
1371 (iDataFormat == EDataH263) ? CCMRMediaBuffer::EVideoH263 : CCMRMediaBuffer::EVideoMPEG4; |
|
1372 |
|
1373 // insert dec. specific info header to beginning of buffer if it has not been sent |
|
1374 if (!iDecoderSpecificInfoSent && |
|
1375 ( iDataFormat == EDataAVC || (iDataFormat == EDataMPEG4 && !aVolHeaderInBuffer) ) ) |
|
1376 { |
|
1377 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() - insert header"), iTranscoderMode)); |
|
1378 InsertDecoderSpecificInfoL(); |
|
1379 } |
|
1380 |
|
1381 // determine transcoder mode for this frame |
|
1382 TTranscoderMode mode; |
|
1383 |
|
1384 if (iFrameToEncode) |
|
1385 { |
|
1386 if ( iTransitionFrame || (iProcessor->GetColorEffect() != EVedColorEffectNone) ) |
|
1387 { |
|
1388 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() encode current frame with intermediate modification"))); |
|
1389 mode = EFullWithIM; |
|
1390 } |
|
1391 else |
|
1392 { |
|
1393 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() encode current frame"))); |
|
1394 mode = EFull; |
|
1395 } |
|
1396 } |
|
1397 else |
|
1398 { |
|
1399 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() only decode current frame"))); |
|
1400 mode = EDecodeOnly; |
|
1401 } |
|
1402 |
|
1403 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() iTranscoderMode %d"), iTranscoderMode)); |
|
1404 if (iTranscoderMode != mode) |
|
1405 { |
|
1406 if (mode == EDecodeOnly) |
|
1407 { |
|
1408 iTransCoder->EnableEncoder(EFalse); |
|
1409 iTransCoder->EnablePictureSink(ETrue); |
|
1410 } |
|
1411 else if (mode == EFull) |
|
1412 { |
|
1413 iTransCoder->EnableEncoder(ETrue); |
|
1414 iTransCoder->EnablePictureSink(EFalse); |
|
1415 } |
|
1416 else |
|
1417 { |
|
1418 iTransCoder->EnableEncoder(ETrue); |
|
1419 iTransCoder->EnablePictureSink(ETrue); |
|
1420 } |
|
1421 iTranscoderMode = mode; |
|
1422 } |
|
1423 |
|
1424 if (iFrameToEncode) |
|
1425 { |
|
1426 // should we encode an I-frame ? |
|
1427 if( (iTransitionFrame && iFirstTransitionFrame) || |
|
1428 (iFrameNumber == iFirstIncludedFrameNumber) || |
|
1429 iFirstFrameAfterTransition ) |
|
1430 { |
|
1431 iTransCoder->SetRandomAccessPoint(); |
|
1432 } |
|
1433 } |
|
1434 |
|
1435 TFrameInformation frameInfo; |
|
1436 frameInfo.iTranscoderMode = iTranscoderMode; |
|
1437 frameInfo.iFrameNumber = iFrameNumber; |
|
1438 frameInfo.iEncodeFrame = iFrameToEncode; |
|
1439 frameInfo.iKeyFrame = aKeyFrame; |
|
1440 |
|
1441 // this timestamp is in ticks for writing the frame |
|
1442 frameInfo.iTimeStamp = iProcessor->VideoFrameTimeStamp(aFrameNumber) + iTimeStampOffset; |
|
1443 frameInfo.iTransitionFrame = iTransitionFrame; |
|
1444 frameInfo.iTransitionPosition = iTransitionPosition; |
|
1445 frameInfo.iTransitionColor = iTransitionColor; |
|
1446 frameInfo.iTransitionFrameNumber = iTransitionFrameNumber; |
|
1447 frameInfo.iModificationApplied = EFalse; |
|
1448 |
|
1449 if(iTransitionFrame && iTransitionPosition == EPositionStartOfClip && |
|
1450 iStartTransitionColor == EColorTransition) |
|
1451 { |
|
1452 TInt duration; |
|
1453 TInt64 currentTimeStamp = iProcessor->VideoFrameTimeStamp(aFrameNumber); |
|
1454 |
|
1455 // get timestamp for 1st frame |
|
1456 iProcessor->GetNextFrameDuration(duration, frameInfo.iTimeStamp, iTimeStampIndex, iTimeStampOffset); |
|
1457 iTimeStampIndex++; |
|
1458 |
|
1459 if (iFirstTransitionFrame) |
|
1460 iCutInTimeStamp = currentTimeStamp; |
|
1461 |
|
1462 frameInfo.iTimeStamp += iCutInTimeStamp; |
|
1463 |
|
1464 // the duration parameter is not used actually, so no need to figure it out |
|
1465 iProcessor->AppendNextFrameDuration(duration, currentTimeStamp - iCutInTimeStamp); |
|
1466 |
|
1467 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
1468 if (iDataFormat == EDataAVC) |
|
1469 { |
|
1470 |
|
1471 TPtr8 ptr(iDataBuffer, iCurrentFrameLength, iBufferLength); |
|
1472 if (!iDecoderSpecificInfoSent) |
|
1473 { |
|
1474 ptr.Set(iDataBuffer + iProcessor->GetDecoderSpecificInfoSize(), |
|
1475 iCurrentFrameLength - iProcessor->GetDecoderSpecificInfoSize(), iBufferLength); |
|
1476 } |
|
1477 |
|
1478 // Store PPS id |
|
1479 iAvcEdit->StoreCurrentPPSId( ptr ); |
|
1480 |
|
1481 if (iNotCodedFrame) |
|
1482 delete iNotCodedFrame; |
|
1483 iNotCodedFrame = 0; |
|
1484 } |
|
1485 #endif |
|
1486 |
|
1487 frameInfo.iRepeatFrame = EFalse; |
|
1488 iFirstColorTransitionFrame = ETrue; // to indicate that iDataBuffer must not be flushed |
|
1489 // in MtroReturnCodedBuffer |
|
1490 } |
|
1491 |
|
1492 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
1493 if ( iDataFormat == EDataAVC && iTransitionEffect && iStartTransitionColor == EColorTransition && |
|
1494 (iFrameNumber > iFirstIncludedFrameNumber) && iFrameOperation != EDecodeAndWrite) |
|
1495 { |
|
1496 TPtr8 ptr(iDataBuffer, iCurrentFrameLength, iBufferLength); |
|
1497 |
|
1498 if (!iDecoderSpecificInfoSent) |
|
1499 { |
|
1500 ptr.Set(iDataBuffer + iProcessor->GetDecoderSpecificInfoSize(), |
|
1501 iCurrentFrameLength - iProcessor->GetDecoderSpecificInfoSize(), iBufferLength); |
|
1502 } |
|
1503 |
|
1504 if (!(iAvcEdit->IsNALUnitIDR(ptr))) |
|
1505 { |
|
1506 // modify frame number |
|
1507 iAvcEdit->ModifyFrameNumber(ptr, iModifiedFrameNumber++); |
|
1508 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() modified frame no. => #%d"), iModifiedFrameNumber - 1)); |
|
1509 } |
|
1510 else |
|
1511 iModifiedFrameNumber = 1; // this frame is IDR, start numbering from zero |
|
1512 } |
|
1513 #endif |
|
1514 |
|
1515 // get timestamp in microseconds |
|
1516 TTimeIntervalMicroSeconds ts = |
|
1517 (iProcessor->GetVideoTimeInMsFromTicks(frameInfo.iTimeStamp, EFalse)) * 1000; |
|
1518 |
|
1519 if (ts <= iPreviousTimeStamp) |
|
1520 { |
|
1521 // adjust timestamp so that its bigger than ts of previous frame |
|
1522 TReal frameRate = iProcessor->GetVideoClipFrameRate(); |
|
1523 VDASSERT(frameRate > 0.0, 106); |
|
1524 TInt64 durationMs = TInt64( ( 1000.0 / frameRate ) + 0.5 ); |
|
1525 durationMs /= 2; // add half the duration of one frame |
|
1526 |
|
1527 ts = TTimeIntervalMicroSeconds( iPreviousTimeStamp.Int64() + durationMs*1000 ); |
|
1528 |
|
1529 frameInfo.iTimeStamp = iProcessor->GetVideoTimeInTicksFromMs( ts.Int64()/1000, EFalse ); |
|
1530 |
|
1531 ts = iProcessor->GetVideoTimeInMsFromTicks(frameInfo.iTimeStamp, EFalse) * 1000; |
|
1532 |
|
1533 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() - adjusted timestamp, prev = %d, new = %d"), |
|
1534 I64INT( iPreviousTimeStamp.Int64() ) / 1000, I64INT( ts.Int64() ) / 1000)); |
|
1535 |
|
1536 } |
|
1537 |
|
1538 iFrameInfoArray.Append(frameInfo); |
|
1539 |
|
1540 iPreviousTimeStamp = ts; |
|
1541 |
|
1542 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
1543 if (iDataFormat == EDataAVC) |
|
1544 { |
|
1545 TPtr8 ptr(iDataBuffer, iCurrentFrameLength, iBufferLength); |
|
1546 // This has to be updated when cutting from beginning ?? |
|
1547 iAvcEdit->ProcessAVCBitStreamL((TDes8&)(ptr), (TInt&)(iCurrentFrameLength), |
|
1548 iProcessor->GetDecoderSpecificInfoSize(), !iDecoderSpecificInfoSent ); |
|
1549 iDataLength = iCurrentFrameLength; |
|
1550 |
|
1551 } |
|
1552 #endif |
|
1553 |
|
1554 iMediaBuffer->Set( TPtrC8(iDataBuffer, iBufferLength), |
|
1555 bt, |
|
1556 iCurrentFrameLength, |
|
1557 aKeyFrame, |
|
1558 ts); |
|
1559 |
|
1560 iDecodePending = ETrue; |
|
1561 if (!IsActive()) |
|
1562 { |
|
1563 SetActive(); |
|
1564 iStatus = KRequestPending; |
|
1565 } |
|
1566 |
|
1567 |
|
1568 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() - transcoder mode is %d"), iTranscoderMode)); |
|
1569 |
|
1570 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() - WriteCodedBuffer, frame #%d, timestamp %d ms"), |
|
1571 iFrameNumber, I64INT( ts.Int64() ) / 1000 )); |
|
1572 |
|
1573 PRINT((_L("CVideoProcessor::WriteFrameToTranscoderL() - %d items in queue"), iFrameInfoArray.Count())); |
|
1574 |
|
1575 iTransCoder->WriteCodedBufferL(iMediaBuffer); |
|
1576 |
|
1577 } |
|
1578 |
|
1579 |
|
1580 // --------------------------------------------------------- |
|
1581 // CVideoProcessor::InsertDecoderSpecificInfoL |
|
1582 // Insert AVC dec. config record in the beginning of slice NAL('s) |
|
1583 // (other items were commented in a header). |
|
1584 // --------------------------------------------------------- |
|
1585 // |
|
1586 void CVideoProcessor::InsertDecoderSpecificInfoL() |
|
1587 { |
|
1588 |
|
1589 if (iDataFormat == EDataMPEG4) |
|
1590 { |
|
1591 if ( (iDataLength + iDecoderSpecificInfo->Length()) > iBufferLength ) |
|
1592 { |
|
1593 // extend buffer size |
|
1594 TUint newSize = iDataLength + iDecoderSpecificInfo->Length(); |
|
1595 iDataBuffer = (TUint8*) User::ReAllocL(iDataBuffer, newSize); |
|
1596 iBufferLength = newSize; |
|
1597 } |
|
1598 Mem::Copy(iDataBuffer+iDecoderSpecificInfo->Length(), iDataBuffer, iCurrentFrameLength); |
|
1599 Mem::Copy(iDataBuffer, iDecoderSpecificInfo->Des().Ptr(), iDecoderSpecificInfo->Length()); |
|
1600 iCurrentFrameLength += iDecoderSpecificInfo->Length(); |
|
1601 iDataLength += iDecoderSpecificInfo->Length(); |
|
1602 |
|
1603 return; |
|
1604 } |
|
1605 |
|
1606 VDASSERT( iDataFormat == EDataAVC, 182 ); |
|
1607 |
|
1608 // get number of slice NAL's in buffer |
|
1609 TInt frameLen = 0; |
|
1610 TInt numSliceNalUnits = 0; |
|
1611 TUint8* frameLenPtr = iDataBuffer; |
|
1612 |
|
1613 while (frameLen < iCurrentFrameLength) |
|
1614 { |
|
1615 TInt nalLen = 0; |
|
1616 |
|
1617 nalLen = (frameLenPtr[0] << 24) + (frameLenPtr[1] << 16) + |
|
1618 (frameLenPtr[2] << 8) + frameLenPtr[3] + 4; // +4 for length field |
|
1619 |
|
1620 frameLenPtr += nalLen; |
|
1621 frameLen += nalLen; |
|
1622 numSliceNalUnits++; |
|
1623 } |
|
1624 |
|
1625 // get no. of SPS & PPS |
|
1626 |
|
1627 TUint8* ptr = const_cast<TUint8*>(iDecoderSpecificInfo->Des().Ptr()); |
|
1628 |
|
1629 TInt index = 4; // Skip version and length information |
|
1630 ptr[index] |= 0x03; // set no. bytes used for length to 4 |
|
1631 |
|
1632 index++; |
|
1633 TInt numSPS = ptr[index] & 0x1f; |
|
1634 |
|
1635 index++; |
|
1636 |
|
1637 // Loop all SPS units |
|
1638 for (TInt i = 0; i < numSPS; ++i) |
|
1639 { |
|
1640 TInt SPSSize = (ptr[index] << 8) + ptr[index + 1]; |
|
1641 index += 2; |
|
1642 index += SPSSize; |
|
1643 } |
|
1644 TInt numPPS = ptr[index]; |
|
1645 |
|
1646 // Align at 32-bit boundrary |
|
1647 TInt payLoadLen = iCurrentFrameLength + iDecoderSpecificInfo->Length(); |
|
1648 TInt alignmentBytes = (payLoadLen % 4 != 0) * ( 4 - (payLoadLen % 4) ); |
|
1649 |
|
1650 // get needed buffer length |
|
1651 TInt minBufLen = iCurrentFrameLength + iDecoderSpecificInfo->Length() + alignmentBytes + |
|
1652 ( (numSliceNalUnits + numSPS + numPPS) * 8 ) + 4; |
|
1653 |
|
1654 // ReAllocate buffer |
|
1655 if (iBufferLength < minBufLen) |
|
1656 { |
|
1657 iDataBuffer = (TUint8*) User::ReAllocL(iDataBuffer, minBufLen); |
|
1658 iBufferLength = minBufLen; |
|
1659 |
|
1660 PRINT((_L("CVideoProcessor::XXX() reallocated databuffer, new length = %d"),iBufferLength)); |
|
1661 } |
|
1662 |
|
1663 // move slice NAL's the amount of DCR length |
|
1664 Mem:: Copy(iDataBuffer + iDecoderSpecificInfo->Length(), iDataBuffer, iCurrentFrameLength); |
|
1665 |
|
1666 // copy SPS/PPS data in the beginning |
|
1667 Mem:: Copy(iDataBuffer, iDecoderSpecificInfo->Des().Ptr(), iDecoderSpecificInfo->Length()); |
|
1668 |
|
1669 iCurrentFrameLength += iDecoderSpecificInfo->Length(); |
|
1670 |
|
1671 } |
|
1672 |
|
1673 // --------------------------------------------------------- |
|
1674 // CVideoProcessor::GetModeChangeL |
|
1675 // Determine need to compr. domain transcoding |
|
1676 // (other items were commented in a header). |
|
1677 // --------------------------------------------------------- |
|
1678 // |
|
1679 TBool CVideoProcessor::GetModeChangeL() |
|
1680 { |
|
1681 |
|
1682 TInt videoClipNumber = iProcessor->GetVideoClipNumber(); |
|
1683 |
|
1684 // iProcessor->GetModeTranslationMPEG4() returns the overall decision for inserted MPEG4 clips |
|
1685 |
|
1686 TVedTranscodeFactor tFact = iProcessor->GetVideoClipTranscodeFactor(videoClipNumber); |
|
1687 |
|
1688 TBool fModeChanged = EFalse; |
|
1689 |
|
1690 if (iProcessor->GetOutputVideoType() == EVedVideoTypeMPEG4SimpleProfile) //MPEG4 |
|
1691 { |
|
1692 |
|
1693 switch (tFact.iStreamType) |
|
1694 { |
|
1695 case EVedVideoBitstreamModeUnknown: |
|
1696 case EVedVideoBitstreamModeMPEG4Regular: |
|
1697 case EVedVideoBitstreamModeMPEG4Resyn: |
|
1698 fModeChanged = EFalse; // already the target mode |
|
1699 break; |
|
1700 case EVedVideoBitstreamModeH263: |
|
1701 fModeChanged = ETrue; |
|
1702 break; |
|
1703 case EVedVideoBitstreamModeMPEG4ShortHeader: |
|
1704 default: // other MPEG4 modes |
|
1705 // if all the MPEG4 (note: it is also considered as MPEG4 type) have the same mode |
|
1706 // no need to do the mode translation |
|
1707 fModeChanged = iProcessor->GetModeTranslationMpeg4() ? ETrue: EFalse; |
|
1708 break; |
|
1709 } |
|
1710 |
|
1711 } |
|
1712 else if ( (iProcessor->GetOutputVideoType() == EVedVideoTypeH263Profile0Level10) || |
|
1713 (iProcessor->GetOutputVideoType() == EVedVideoTypeH263Profile0Level45) ) |
|
1714 { |
|
1715 |
|
1716 if (tFact.iStreamType == EVedVideoBitstreamModeH263 || |
|
1717 tFact.iStreamType == EVedVideoBitstreamModeMPEG4ShortHeader|| |
|
1718 tFact.iStreamType ==EVedVideoBitstreamModeUnknown) |
|
1719 { |
|
1720 fModeChanged = EFalse; |
|
1721 } |
|
1722 else |
|
1723 { |
|
1724 fModeChanged = ETrue; |
|
1725 } |
|
1726 } |
|
1727 |
|
1728 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
1729 else if (iProcessor->GetOutputVideoType() == EVedVideoTypeAVCBaselineProfile) |
|
1730 fModeChanged = EFalse; |
|
1731 #endif |
|
1732 |
|
1733 else // EVedVideoTypeNoVideo |
|
1734 { |
|
1735 User::Leave(KErrNotSupported); |
|
1736 } |
|
1737 |
|
1738 return fModeChanged; |
|
1739 |
|
1740 } |
|
1741 |
|
1742 // --------------------------------------------------------- |
|
1743 // CVideoProcessor::DecodeFrameL |
|
1744 // Decode frame in iDataBuffer using vedh263d |
|
1745 // (other items were commented in a header). |
|
1746 // --------------------------------------------------------- |
|
1747 // |
|
1748 void CVideoProcessor::DecodeFrameL(TInt aOperation, TBool aModeChanged, TInt& aFrameSizeInBytes) |
|
1749 { |
|
1750 |
|
1751 VDASSERT(iDataFormat == EDataH263 || iDataFormat == EDataMPEG4, 136); |
|
1752 |
|
1753 // go into the decoder |
|
1754 TVedColorEffect vedEffect = iProcessor->GetColorEffect(); |
|
1755 CVedH263Dec::TColorEffect effect = (vedEffect == EVedColorEffectNone) ? |
|
1756 CVedH263Dec::EColorEffectNone : ((vedEffect == EVedColorEffectBlackAndWhite) ? CVedH263Dec::EColorEffectBlackAndWhite : CVedH263Dec::EColorEffectToning); |
|
1757 |
|
1758 vdeDecodeParamters_t decodeParams; |
|
1759 |
|
1760 // Assign the ColorTone value of the ColorTone |
|
1761 // U,V value for the color toning |
|
1762 TInt colorToneU; |
|
1763 TInt colorToneV; |
|
1764 iProcessor->GetColorTone((TInt&)colorToneU, (TInt&)colorToneV); |
|
1765 |
|
1766 decodeParams.aColorToneU = colorToneU; |
|
1767 decodeParams.aColorToneV = colorToneV; |
|
1768 decodeParams.aColorEffect = effect; |
|
1769 decodeParams.aFrameOperation = aOperation; |
|
1770 decodeParams.aGetDecodedFrame = EFalse; //getDecodedFrame; // no need to get YUV |
|
1771 decodeParams.aSMSpeed = iProcessor->GetSlowMotionSpeed(); |
|
1772 |
|
1773 TInt trD = iProcessor->GetTrPrevOrig(); |
|
1774 TInt trP = iProcessor->GetTrPrevNew(); |
|
1775 |
|
1776 decodeParams.aTrD = &trD; |
|
1777 decodeParams.aTrP = &trP; |
|
1778 decodeParams.aVideoClipNumber = iProcessor->GetVideoClipNumber(); |
|
1779 |
|
1780 TVedTranscodeFactor tFact = iProcessor->GetVideoClipTranscodeFactor(decodeParams.aVideoClipNumber); |
|
1781 |
|
1782 decodeParams.streamMode = tFact.iStreamType; |
|
1783 decodeParams.iTimeIncrementResolution = tFact.iTRes; |
|
1784 decodeParams.aGetVideoMode = EFalse; |
|
1785 decodeParams.aOutputVideoFormat = iProcessor->GetOutputVideoType(); |
|
1786 |
|
1787 decodeParams.fModeChanged = aModeChanged; |
|
1788 decodeParams.fHaveDifferentModes = iProcessor->GetModeTranslationMpeg4() ? ETrue: EFalse; |
|
1789 /* Color Toning */ |
|
1790 decodeParams.aFirstFrameQp = iFirstFrameQp; |
|
1791 |
|
1792 // : Optimisation - If the frame is to be encoded, there is no need |
|
1793 // to process it using vedh263d in all cases, for example when |
|
1794 // doing end transition. In start transition case it has to be done |
|
1795 // so that compressed domain transcoding can continue after transition |
|
1796 |
|
1797 // before decoding, set the time infomation in the decoder parameters |
|
1798 decodeParams.aMPEG4TimeStamp = iMPEG4Timer->GetMPEG4TimeStampPtr(); |
|
1799 decodeParams.aMPEG4TargetTimeResolution = iMPEG4Timer->GetMPEG4TimeResolutionPtr(); |
|
1800 |
|
1801 decodeParams.vosHeaderSize = 0; |
|
1802 |
|
1803 // +3 includes the next PSC or EOS in the bit buffer |
|
1804 TPtrC8 inputPtr(iDataBuffer, iCurrentFrameLength + (iDataFormat==EDataH263 ? KH263StartCodeLength : KMPEG4StartCodeLength)); |
|
1805 |
|
1806 // check output buffer size & reallocate if its too small |
|
1807 if ( TReal(iOutVideoFrameBufferLength) < TReal(iCurrentFrameLength) * 1.5 ) |
|
1808 { |
|
1809 TInt newLen = TInt( TReal(iCurrentFrameLength) * 1.5 ); |
|
1810 |
|
1811 iOutVideoFrameBuffer = (TUint8*) User::ReAllocL(iOutVideoFrameBuffer, newLen); |
|
1812 iOutVideoFrameBufferLength = newLen; |
|
1813 |
|
1814 PRINT((_L("CVideoProcessor::DecodeFrameL() reallocated output buffer, new size = %d"), |
|
1815 iOutVideoFrameBufferLength)); |
|
1816 } |
|
1817 |
|
1818 TPtr8 outputPtr(iOutVideoFrameBuffer, 0, iOutVideoFrameBufferLength); |
|
1819 |
|
1820 TInt frameSize = 0; |
|
1821 TBool wasFirstFrame = iFirstFrameFlag; // need to save the value since it may be changed inside |
|
1822 iDecoder->DecodeFrameL(inputPtr, outputPtr, iFirstFrameFlag, frameSize, &decodeParams); |
|
1823 |
|
1824 if (frameSize > (TInt)iCurrentFrameLength) |
|
1825 { |
|
1826 // decoder used more data than was in the buffer => corrupted bitstream |
|
1827 PRINT((_L("CVideoProcessor::DecodeFrameL() decoder used more data than was in the buffer => corrupted bitstream"))); |
|
1828 User::Leave( KErrCorrupt ); |
|
1829 } |
|
1830 |
|
1831 aFrameSizeInBytes = outputPtr.Length(); |
|
1832 |
|
1833 /* record first frame QP */ |
|
1834 if ((iFrameNumber==0) && wasFirstFrame) |
|
1835 { |
|
1836 iFirstFrameQp = decodeParams.aFirstFrameQp; |
|
1837 |
|
1838 if (iProcessor->GetOutputVideoType() == EVedVideoTypeMPEG4SimpleProfile) |
|
1839 { |
|
1840 PRINT((_L("CVideoProcessor::DecodeFrameL() save VOS header, size %d"), decodeParams.vosHeaderSize)); |
|
1841 // sync the vol headers |
|
1842 if ( decodeParams.vosHeaderSize > iOutputVolHeader->Des().MaxLength() ) |
|
1843 { |
|
1844 delete iOutputVolHeader; |
|
1845 iOutputVolHeader = NULL; |
|
1846 iOutputVolHeader = HBufC8::NewL(decodeParams.vosHeaderSize); |
|
1847 } |
|
1848 iOutputVolHeader->Des().Copy( outputPtr.Ptr(), decodeParams.vosHeaderSize ); |
|
1849 } |
|
1850 } |
|
1851 |
|
1852 PRINT((_L("CVideoProcessor::DecodeFrameL() out"))); |
|
1853 } |
|
1854 |
|
1855 // --------------------------------------------------------- |
|
1856 // CVideoProcessor::CreateAndInitializeTranscoderL |
|
1857 // (other items were commented in a header). |
|
1858 // --------------------------------------------------------- |
|
1859 // |
|
1860 void CVideoProcessor::CreateAndInitializeTranscoderL(TVedVideoType aInputType, CTRTranscoder::TTROperationalMode aMode) |
|
1861 { |
|
1862 |
|
1863 PRINT((_L("CVideoProcessor::CreateAndInitializeTranscoderL() begin"))); |
|
1864 |
|
1865 VDASSERT(iTransCoder == 0, 27); |
|
1866 |
|
1867 iTransCoder = CTRTranscoder::NewL(*this); |
|
1868 |
|
1869 TBufC8<256> inputMime; |
|
1870 |
|
1871 // TInt volLength = 0; |
|
1872 TInt outputBufferSize = 0; |
|
1873 iInputMPEG4ProfileLevelId = 0; |
|
1874 |
|
1875 if (aInputType == EVedVideoTypeMPEG4SimpleProfile) |
|
1876 { |
|
1877 // Get the VOL header from the frame data |
|
1878 CVedVolReader* reader = CVedVolReader::NewL(); |
|
1879 CleanupStack::PushL(reader); |
|
1880 // Get pointer to the frame data |
|
1881 TPtrC8 inputPtr(iDecoderSpecificInfo->Des()); |
|
1882 reader->ParseVolHeaderL((TDesC8&) inputPtr); |
|
1883 |
|
1884 iInputMPEG4ProfileLevelId = reader->ProfileLevelId(); |
|
1885 |
|
1886 iInputTimeIncrementResolution = reader->TimeIncrementResolution(); |
|
1887 |
|
1888 iInputStreamMode = reader->BitstreamMode(); |
|
1889 |
|
1890 switch (iInputMPEG4ProfileLevelId) |
|
1891 { |
|
1892 case 8: |
|
1893 inputMime = _L8("video/mp4v-es; profile-level-id=8"); |
|
1894 outputBufferSize = KMaxCodedPictureSizeMPEG4QCIF / 2; |
|
1895 break; |
|
1896 |
|
1897 case 9: |
|
1898 inputMime = _L8("video/mp4v-es; profile-level-id=9"); |
|
1899 outputBufferSize = KMaxCodedPictureSizeMPEG4L0BQCIF / 2; |
|
1900 break; |
|
1901 |
|
1902 case 1: |
|
1903 inputMime = _L8("video/mp4v-es; profile-level-id=1"); |
|
1904 outputBufferSize = KMaxCodedPictureSizeMPEG4QCIF / 2; |
|
1905 break; |
|
1906 |
|
1907 case 2: |
|
1908 inputMime = _L8("video/mp4v-es; profile-level-id=2"); |
|
1909 outputBufferSize = KMaxCodedPictureSizeMPEG4CIF / 2; |
|
1910 break; |
|
1911 |
|
1912 case 3: |
|
1913 inputMime = _L8("video/mp4v-es; profile-level-id=3"); |
|
1914 outputBufferSize = KMaxCodedPictureSizeMPEG4CIF / 2; |
|
1915 break; |
|
1916 |
|
1917 case 4: |
|
1918 inputMime = _L8("video/mp4v-es; profile-level-id=4"); |
|
1919 outputBufferSize = KMaxCodedPictureSizeVGA / 2; |
|
1920 break; |
|
1921 |
|
1922 default: |
|
1923 inputMime = _L8("video/mp4v-es; profile-level-id=8"); |
|
1924 outputBufferSize = KMaxCodedPictureSizeMPEG4QCIF / 2; |
|
1925 break; |
|
1926 } |
|
1927 |
|
1928 // volLength = reader->HeaderSize(); |
|
1929 CleanupStack::PopAndDestroy(reader); |
|
1930 } |
|
1931 |
|
1932 else if (aInputType == EVedVideoTypeH263Profile0Level10) |
|
1933 { |
|
1934 inputMime = _L8("video/H263-2000; profile=0; level=10"); |
|
1935 outputBufferSize = KMaxCodedPictureSizeQCIF / 2; |
|
1936 } |
|
1937 |
|
1938 else if (aInputType == EVedVideoTypeH263Profile0Level45) |
|
1939 { |
|
1940 inputMime = _L8("video/H263-2000; profile=0; level=45"); |
|
1941 outputBufferSize = KMaxCodedPictureSizeQCIF / 2; |
|
1942 } |
|
1943 |
|
1944 else if (aInputType == EVedVideoTypeAVCBaselineProfile) |
|
1945 { |
|
1946 // get input avc level |
|
1947 VDASSERT( iAvcEdit != 0, 181 ); |
|
1948 VDASSERT( iDecoderSpecificInfo, 181 ); |
|
1949 |
|
1950 TPtr8 info = iDecoderSpecificInfo->Des(); |
|
1951 User::LeaveIfError( iAvcEdit->GetLevel(info, iInputAVCLevel) ); |
|
1952 |
|
1953 switch (iInputAVCLevel) |
|
1954 { |
|
1955 case 10: |
|
1956 inputMime = _L8("video/H264; profile-level-id=42800A"); |
|
1957 outputBufferSize = KMaxCodedPictureSizeAVCLevel1 / 2; |
|
1958 break; |
|
1959 |
|
1960 case 101: |
|
1961 inputMime = _L8("video/H264; profile-level-id=42900B"); |
|
1962 outputBufferSize = KMaxCodedPictureSizeAVCLevel1B / 2; |
|
1963 break; |
|
1964 |
|
1965 case 11: |
|
1966 inputMime = _L8("video/H264; profile-level-id=42800B"); |
|
1967 outputBufferSize = KMaxCodedPictureSizeAVCLevel1_1 / 2; |
|
1968 break; |
|
1969 |
|
1970 case 12: |
|
1971 inputMime = _L8("video/H264; profile-level-id=42800C"); |
|
1972 outputBufferSize = KMaxCodedPictureSizeAVCLevel1_2 / 2; |
|
1973 break; |
|
1974 |
|
1975 // NOTE: Levels 1.3 and 2 are enabled for testing purposes, |
|
1976 // to be removed |
|
1977 case 13: |
|
1978 inputMime = _L8("video/H264; profile-level-id=42800D"); |
|
1979 outputBufferSize = KMaxCodedPictureSizeAVCLevel1_3 / 2; |
|
1980 break; |
|
1981 |
|
1982 case 20: |
|
1983 inputMime = _L8("video/H264; profile-level-id=428014"); |
|
1984 outputBufferSize = KMaxCodedPictureSizeAVCLevel2 / 2; |
|
1985 break; |
|
1986 |
|
1987 //WVGA task |
|
1988 case 21: |
|
1989 inputMime = _L8("video/H264; profile-level-id=428015"); |
|
1990 outputBufferSize = KMaxCodedPictureSizeAVCLevel2_1 / 2; |
|
1991 break; |
|
1992 |
|
1993 case 22: |
|
1994 inputMime = _L8("video/H264; profile-level-id=428016"); |
|
1995 outputBufferSize = KMaxCodedPictureSizeAVCLevel2_2 / 2; |
|
1996 break; |
|
1997 |
|
1998 case 30: |
|
1999 inputMime = _L8("video/H264; profile-level-id=42801E"); |
|
2000 outputBufferSize = KMaxCodedPictureSizeAVCLevel3 / 2; |
|
2001 break; |
|
2002 |
|
2003 case 31: |
|
2004 inputMime = _L8("video/H264; profile-level-id=42801F"); |
|
2005 outputBufferSize = KMaxCodedPictureSizeAVCLevel3_1 / 2; |
|
2006 break; |
|
2007 |
|
2008 default: |
|
2009 User::Leave(KErrNotSupported); |
|
2010 break; |
|
2011 } |
|
2012 } |
|
2013 |
|
2014 else |
|
2015 User::Leave(KErrNotSupported); |
|
2016 |
|
2017 if ( !(iTransCoder->SupportsInputVideoFormat(inputMime) ) ) |
|
2018 { |
|
2019 User::Leave(KErrNotSupported); |
|
2020 } |
|
2021 |
|
2022 |
|
2023 // default framerate is 15 fps |
|
2024 TReal frameRate = 15.0; |
|
2025 |
|
2026 iOutputBitRate = 64000; |
|
2027 |
|
2028 if ( aMode == CTRTranscoder::EFullTranscoding ) |
|
2029 { |
|
2030 // get output mime type |
|
2031 SetOutputVideoCodecL(iProcessor->GetOutputVideoMimeType()); |
|
2032 |
|
2033 if ( !(iTransCoder->SupportsOutputVideoFormat(iOutputMimeType) ) ) |
|
2034 { |
|
2035 User::Leave(KErrNotSupported); |
|
2036 } |
|
2037 |
|
2038 // check output resolution |
|
2039 TSize outputResolution = iProcessor->GetMovieResolution(); |
|
2040 |
|
2041 if ( (outputResolution.iWidth > iMaxOutputResolution.iWidth) || (outputResolution.iHeight > iMaxOutputResolution.iHeight)) |
|
2042 { |
|
2043 if ( iArbitrarySizeAllowed ) // This is for future-proofness. Currently the checking of standard sizes below overrules this one |
|
2044 { |
|
2045 if ( outputResolution.iWidth * outputResolution.iHeight > iMaxOutputResolution.iWidth*iMaxOutputResolution.iHeight ) |
|
2046 { |
|
2047 PRINT((_L("CVideoProcessor::CreateAndInitializeTranscoderL() too high resolution requested"))); |
|
2048 User::Leave( KErrNotSupported ); |
|
2049 } |
|
2050 } |
|
2051 else |
|
2052 { |
|
2053 PRINT((_L("CVideoProcessor::CreateAndInitializeTranscoderL() incompatible or too high resolution requested"))); |
|
2054 User::Leave( KErrNotSupported ); |
|
2055 } |
|
2056 } |
|
2057 |
|
2058 // check size. For now only standard sizes are allowed |
|
2059 if ( (outputResolution != KVedResolutionSubQCIF) && |
|
2060 (outputResolution != KVedResolutionQCIF) && |
|
2061 (outputResolution != KVedResolutionCIF) && |
|
2062 (outputResolution != KVedResolutionQVGA) && |
|
2063 (outputResolution != KVedResolutionVGA16By9) && |
|
2064 (outputResolution != KVedResolutionVGA) && |
|
2065 //WVGA task |
|
2066 (outputResolution != KVedResolutionWVGA) ) |
|
2067 { |
|
2068 User::Leave( KErrArgument ); |
|
2069 } |
|
2070 |
|
2071 // check output frame rate |
|
2072 TReal movieFrameRate = iProcessor->GetMovieFrameRate(); |
|
2073 |
|
2074 if ( movieFrameRate > 0.0 ) |
|
2075 { |
|
2076 if ( movieFrameRate <= iMaxOutputFrameRate ) |
|
2077 { |
|
2078 frameRate = TReal32(movieFrameRate); |
|
2079 } |
|
2080 else |
|
2081 { |
|
2082 frameRate = iMaxOutputFrameRate; |
|
2083 } |
|
2084 } |
|
2085 |
|
2086 // check output bitrate |
|
2087 TInt movieBitRate = iProcessor->GetMovieVideoBitrate(); |
|
2088 TInt standardBitRate = iProcessor->GetMovieStandardVideoBitrate(); |
|
2089 |
|
2090 if ( movieBitRate > 0 ) |
|
2091 { |
|
2092 if ( movieBitRate <= iMaxOutputBitRate ) |
|
2093 { |
|
2094 iOutputBitRate = movieBitRate; |
|
2095 } |
|
2096 else |
|
2097 { |
|
2098 iOutputBitRate = iMaxOutputBitRate; |
|
2099 } |
|
2100 } |
|
2101 else if ( standardBitRate > 0 ) |
|
2102 { |
|
2103 if ( standardBitRate <= iMaxOutputBitRate ) |
|
2104 { |
|
2105 iOutputBitRate = standardBitRate; |
|
2106 } |
|
2107 else |
|
2108 { |
|
2109 iOutputBitRate = iMaxOutputBitRate; |
|
2110 } |
|
2111 } |
|
2112 } |
|
2113 else |
|
2114 { |
|
2115 iOutputMimeType = KNullDesC8; |
|
2116 } |
|
2117 |
|
2118 TTRVideoFormat videoInputFormat; |
|
2119 TTRVideoFormat videoOutputFormat; |
|
2120 |
|
2121 if (!iThumbnailMode) |
|
2122 { |
|
2123 videoInputFormat.iSize = iProcessor->GetVideoClipResolution(); |
|
2124 videoOutputFormat.iSize = iProcessor->GetMovieResolution(); |
|
2125 } |
|
2126 else |
|
2127 { |
|
2128 videoInputFormat.iSize = videoOutputFormat.iSize = TSize(iVideoWidth, iVideoHeight); |
|
2129 } |
|
2130 |
|
2131 videoInputFormat.iDataType = CTRTranscoder::ETRDuCodedPicture; |
|
2132 |
|
2133 if (aMode == CTRTranscoder::EFullTranscoding) |
|
2134 videoOutputFormat.iDataType = CTRTranscoder::ETRDuCodedPicture; |
|
2135 else |
|
2136 videoOutputFormat.iDataType = CTRTranscoder::ETRYuvRawData420; |
|
2137 |
|
2138 |
|
2139 iTransCoder->OpenL( this, |
|
2140 aMode, |
|
2141 inputMime, |
|
2142 iOutputMimeType, |
|
2143 videoInputFormat, |
|
2144 videoOutputFormat, |
|
2145 EFalse ); |
|
2146 |
|
2147 |
|
2148 iTransCoder->SetVideoBitRateL(iOutputBitRate); |
|
2149 |
|
2150 if (!iThumbnailMode) |
|
2151 { |
|
2152 // check framerate: target framerate cannot be larger than source framerate |
|
2153 TReal inputFR = iProcessor->GetVideoClipFrameRate(); |
|
2154 if ( inputFR <= 15.0 ) |
|
2155 { |
|
2156 inputFR = 15.0; |
|
2157 } |
|
2158 else |
|
2159 { |
|
2160 inputFR = 30.0; |
|
2161 } |
|
2162 if (frameRate > inputFR) |
|
2163 frameRate = inputFR; |
|
2164 } |
|
2165 |
|
2166 iTransCoder->SetFrameRateL(frameRate); |
|
2167 iTransCoder->SetChannelBitErrorRateL(0.0); |
|
2168 |
|
2169 // dummy |
|
2170 TTRVideoCodingOptions codingOptions; |
|
2171 codingOptions.iSyncIntervalInPicture = iProcessor->GetSyncIntervalInPicture(); |
|
2172 codingOptions.iMinRandomAccessPeriodInSeconds = (TInt) (1.0 / iProcessor->GetRandomAccessRate()); |
|
2173 codingOptions.iDataPartitioning = EFalse; |
|
2174 codingOptions.iReversibleVLC = EFalse; |
|
2175 codingOptions.iHeaderExtension = 0; |
|
2176 |
|
2177 iTransCoder->SetVideoCodingOptionsL(codingOptions); |
|
2178 |
|
2179 TSize targetSize; |
|
2180 if (!iThumbnailMode) |
|
2181 targetSize = iProcessor->GetMovieResolution(); |
|
2182 else |
|
2183 targetSize = TSize(iVideoWidth, iVideoHeight); |
|
2184 |
|
2185 iTransCoder->SetVideoPictureSinkOptionsL(targetSize, this); |
|
2186 |
|
2187 iTransCoder->EnableEncoder(EFalse); |
|
2188 iTransCoder->EnablePictureSink(ETrue); |
|
2189 iTranscoderMode = EDecodeOnly; |
|
2190 |
|
2191 // set init. data |
|
2192 TPtrC8 initData; |
|
2193 if (aInputType == EVedVideoTypeMPEG4SimpleProfile || |
|
2194 aInputType == EVedVideoTypeAVCBaselineProfile) |
|
2195 { |
|
2196 initData.Set(iDecoderSpecificInfo->Des()); |
|
2197 } |
|
2198 else |
|
2199 initData.Set(iDataBuffer, iCurrentFrameLength); |
|
2200 |
|
2201 iDecoderSpecificInfoSent = EFalse; |
|
2202 |
|
2203 iTransCoder->SetDecoderInitDataL( initData ); |
|
2204 |
|
2205 if (!iThumbnailMode) |
|
2206 { |
|
2207 // allocate output bitstream buffer for processing with vedh263d |
|
2208 VDASSERT( outputBufferSize != 0, 52 ); |
|
2209 iOutVideoFrameBuffer = (TUint8*) User::AllocL(outputBufferSize); |
|
2210 iOutVideoFrameBufferLength = outputBufferSize; |
|
2211 } |
|
2212 |
|
2213 iTranscoderInitPending = ETrue; |
|
2214 |
|
2215 if (!IsActive()) |
|
2216 { |
|
2217 SetActive(); |
|
2218 iStatus = KRequestPending; |
|
2219 } |
|
2220 |
|
2221 |
|
2222 iTransCoder->InitializeL(); |
|
2223 |
|
2224 // Get processing time estimate from transcoder, divide it by the framerate to get processing time per frame |
|
2225 // and then multiply it by 2 to get some safety margin and by unit conversion factor 1000000. |
|
2226 // The delay is used to determine if a frame was skipped, hence there should be some margin. |
|
2227 #ifdef __WINSCW__ |
|
2228 iMaxEncodingDelay = 5000000; // emulator can be really slow, use 5 seconds timeout |
|
2229 #else |
|
2230 iMaxEncodingDelay = (TUint)(2*1000000*iTransCoder->EstimateTranscodeTimeFactorL(videoInputFormat,videoOutputFormat)/frameRate); |
|
2231 #endif |
|
2232 |
|
2233 iMaxItemsInProcessingQueue = iTransCoder->GetMaxFramesInProcessing(); |
|
2234 |
|
2235 PRINT((_L("CVideoProcessor::CreateAndInitializeTranscoderL() end"))); |
|
2236 } |
|
2237 |
|
2238 // --------------------------------------------------------- |
|
2239 // CVideoProcessor::MtroInitializeComplete |
|
2240 // Called by transcoder to indicate init. completion |
|
2241 // (other items were commented in a header). |
|
2242 // --------------------------------------------------------- |
|
2243 // |
|
2244 void CVideoProcessor::MtroInitializeComplete(TInt aError) |
|
2245 { |
|
2246 |
|
2247 TInt error = aError; |
|
2248 TInt outputTimeIncrementResolution = KDefaultTimeIncrementResolution; |
|
2249 |
|
2250 PRINT((_L("CVideoProcessor::MtroInitializeComplete() error = %d"), aError)); |
|
2251 |
|
2252 if ( !iThumbnailMode && (aError == KErrNone) && |
|
2253 ( (iProcessor->GetOutputVideoType() == EVedVideoTypeMPEG4SimpleProfile) |
|
2254 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
2255 || (iProcessor->GetOutputVideoType() == EVedVideoTypeAVCBaselineProfile) |
|
2256 #endif |
|
2257 ) ) |
|
2258 { |
|
2259 PRINT((_L("CVideoProcessor::MtroInitializeComplete() calling GetCodingStandardSpecificInitOutputLC"))); |
|
2260 |
|
2261 // get & save vol header from encoder |
|
2262 TRAP(error, |
|
2263 { |
|
2264 iOutputVolHeader = iTransCoder->GetCodingStandardSpecificInitOutputLC(); |
|
2265 CleanupStack::Pop(); |
|
2266 }); |
|
2267 |
|
2268 iOutputVolHeaderWritten = EFalse; |
|
2269 |
|
2270 if ( error == KErrNone ) |
|
2271 { |
|
2272 |
|
2273 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
2274 if (iProcessor->GetOutputVideoType() == EVedVideoTypeAVCBaselineProfile) |
|
2275 { |
|
2276 // Check if we need to use encoder |
|
2277 // : Are there any other cases where encoder is used _ |
|
2278 if ( iFullTranscoding || iTransitionEffect || |
|
2279 iProcessor->GetStartCutTime() != TTimeIntervalMicroSeconds(0) ) |
|
2280 { |
|
2281 HBufC8* outputAVCHeader = 0; |
|
2282 // is the max. size of AVCDecoderConfigurationRecord known here ?? |
|
2283 TRAP(error, outputAVCHeader = (HBufC8*) HBufC8::NewL(16384)); |
|
2284 |
|
2285 if (error == KErrNone) |
|
2286 { |
|
2287 TPtr8 ptr = outputAVCHeader->Des(); |
|
2288 |
|
2289 // parse header & convert it to AVCDecoderConfigurationRecord -format |
|
2290 TRAP(error, iAvcEdit->ConvertAVCHeaderL(*iOutputVolHeader, ptr)); |
|
2291 |
|
2292 if (error == KErrNone) |
|
2293 { |
|
2294 TRAP(error, iAvcEdit->SaveAVCDecoderConfigurationRecordL(ptr, ETrue)); |
|
2295 } |
|
2296 } |
|
2297 if (outputAVCHeader) |
|
2298 delete outputAVCHeader; |
|
2299 } |
|
2300 |
|
2301 iEncodeUntilIDR = 0; |
|
2302 if ( iStartOfClipTransition != 0 || |
|
2303 iProcessor->GetStartCutTime() != TTimeIntervalMicroSeconds(0) ) |
|
2304 { |
|
2305 // we need to use encoder at the beginning, now determine |
|
2306 // if we have to encode frames after cut / transition until |
|
2307 // input frame is IDR |
|
2308 iEncodeUntilIDR = iAvcEdit->EncodeUntilIDR(); |
|
2309 } |
|
2310 } |
|
2311 else |
|
2312 #endif |
|
2313 { |
|
2314 |
|
2315 VDASSERT(iOutputVolHeader, 49); |
|
2316 |
|
2317 // get time increment resolution using vol reader |
|
2318 CVedVolReader* reader = NULL; |
|
2319 TRAP( error, reader = CVedVolReader::NewL() ); |
|
2320 |
|
2321 if ( error == KErrNone ) |
|
2322 { |
|
2323 TRAP( error, reader->ParseVolHeaderL( (TDesC8&) *iOutputVolHeader ) ); |
|
2324 if (error == KErrNone) |
|
2325 { |
|
2326 outputTimeIncrementResolution = reader->TimeIncrementResolution(); |
|
2327 } |
|
2328 delete reader; |
|
2329 } |
|
2330 } |
|
2331 |
|
2332 } |
|
2333 } |
|
2334 |
|
2335 if (error == KErrNone) |
|
2336 { |
|
2337 // create MPEG-4 timing instance |
|
2338 TRAP(error, iMPEG4Timer = CMPEG4Timer::NewL(iProcessor, outputTimeIncrementResolution)); |
|
2339 } |
|
2340 |
|
2341 // enable pausing |
|
2342 if ( ((iFullTranscoding) |
|
2343 || (iProcessor->GetStartCutTime() > 0)) |
|
2344 && (iStartOfClipTransition == 0) |
|
2345 && (iEndOfClipTransition == 0) ) |
|
2346 { |
|
2347 // safe to enable pausing during transcoding: |
|
2348 // only when doing full transcoding or cutting from the beginning, but not if transitions |
|
2349 // rules out e.g. thumbnails, and codec-free cases |
|
2350 iTransCoder->EnablePausing(ETrue); |
|
2351 } |
|
2352 |
|
2353 VDASSERT(iTranscoderInitPending, 28); |
|
2354 // complete request |
|
2355 TRequestStatus *status = &iStatus; |
|
2356 User::RequestComplete(status, error); |
|
2357 |
|
2358 } |
|
2359 |
|
2360 // --------------------------------------------------------- |
|
2361 // CVideoProcessor::MtroFatalError |
|
2362 // Called by transcoder to indicate a fatal error |
|
2363 // (other items were commented in a header). |
|
2364 // --------------------------------------------------------- |
|
2365 // |
|
2366 void CVideoProcessor::MtroFatalError(TInt aError) |
|
2367 { |
|
2368 PRINT((_L("CVideoProcessor::MtroFatalError() %d"), aError)); |
|
2369 |
|
2370 if (iFullTranscoding || iThumbnailMode || iTransitionEffect || (iProcessor->GetStartCutTime() > 0)) |
|
2371 { |
|
2372 // ok, this is fatal, continue the method |
|
2373 PRINT((_L("CVideoProcessor::MtroFatalError() transcoder is in use, this is fatal"))); |
|
2374 } |
|
2375 else |
|
2376 { |
|
2377 // transcoder not in use, ignore |
|
2378 PRINT((_L("CVideoProcessor::MtroFatalError() transcoder not in use, ignore"))); |
|
2379 return; |
|
2380 } |
|
2381 |
|
2382 // stop decoding |
|
2383 Stop(); |
|
2384 |
|
2385 if (!iThumbnailMode) |
|
2386 iMonitor->Error(aError); |
|
2387 else |
|
2388 iProcessor->NotifyThumbnailReady(aError); |
|
2389 |
|
2390 } |
|
2391 |
|
2392 // --------------------------------------------------------- |
|
2393 // CVideoProcessor::MtroSuspend |
|
2394 // Suspends processing |
|
2395 // (other items were commented in a header). |
|
2396 // --------------------------------------------------------- |
|
2397 // |
|
2398 void CVideoProcessor::MtroSuspend() |
|
2399 { |
|
2400 |
|
2401 PRINT((_L("CVideoProcessor::MtroSuspend()"))); |
|
2402 |
|
2403 if (iProcessingComplete || (!iProcessor->NeedTranscoderAnyMore())) |
|
2404 { |
|
2405 PRINT((_L("CVideoProcessor::MtroSuspend(), this clip done or no video at all to process any more, ignore"))); |
|
2406 return; |
|
2407 } |
|
2408 |
|
2409 Cancel(); |
|
2410 iDecoding = EFalse; |
|
2411 iDecodePending = EFalse; |
|
2412 iDecodingSuspended = EFalse; |
|
2413 |
|
2414 if (iTimer) |
|
2415 iTimer->CancelTimer(); |
|
2416 |
|
2417 iProcessor->SuspendProcessing(); |
|
2418 |
|
2419 // flush input queue |
|
2420 if (iBlock) |
|
2421 iQueue->ReturnBlock(iBlock); |
|
2422 iBlock = iQueue->ReadBlock(); |
|
2423 while (iBlock) |
|
2424 { |
|
2425 iQueue->ReturnBlock(iBlock); |
|
2426 iBlock = iQueue->ReadBlock(); |
|
2427 } |
|
2428 iBlockPos = 0; |
|
2429 |
|
2430 iTranscoderMode = EUndefined; // force to reset the mode when writing the next picture |
|
2431 |
|
2432 } |
|
2433 |
|
2434 // --------------------------------------------------------- |
|
2435 // CVideoProcessor::MtroResume |
|
2436 // Re-starts processing after pause |
|
2437 // (other items were commented in a header). |
|
2438 // --------------------------------------------------------- |
|
2439 // |
|
2440 void CVideoProcessor::MtroResume() |
|
2441 { |
|
2442 PRINT((_L("CVideoProcessor::MtroResume()"))); |
|
2443 |
|
2444 if (iProcessingComplete) |
|
2445 { |
|
2446 PRINT((_L("CVideoProcessor::MtroResume(), processing of this clip completed, continue"))); |
|
2447 // fake RunL with this flag |
|
2448 iDecoding = ETrue; |
|
2449 if (!IsActive()) |
|
2450 { |
|
2451 // Make us active |
|
2452 PRINT((_L("CVideoProcessor::MtroResume() set active"))); |
|
2453 SetActive(); |
|
2454 iStatus = KRequestPending; |
|
2455 } |
|
2456 } |
|
2457 |
|
2458 if (!iProcessor->NeedTranscoderAnyMore()) |
|
2459 { |
|
2460 PRINT((_L("CVideoProcessor::MtroResume(), no video to process any more, ignore"))); |
|
2461 return; |
|
2462 } |
|
2463 |
|
2464 // flush frame info array and cancel timer |
|
2465 if (iTimer) |
|
2466 iTimer->CancelTimer(); |
|
2467 iFrameInfoArray.Reset(); |
|
2468 |
|
2469 Start(); |
|
2470 |
|
2471 TInt error = iProcessor->ResumeProcessing(iFrameNumber, iLastWrittenFrameNumber); |
|
2472 if (error != KErrNone) |
|
2473 iMonitor->Error(error); |
|
2474 |
|
2475 iNumberOfFrames = iProcessor->GetClipNumberOfFrames(); |
|
2476 iPreviousTimeStamp = TTimeIntervalMicroSeconds(-1); |
|
2477 |
|
2478 iDataLength = iCurrentFrameLength = 0; |
|
2479 iDataFormat = EDataUnknown; |
|
2480 |
|
2481 iStreamEnd = iStreamEndRead = 0; |
|
2482 |
|
2483 // reset also delayed buffer; it will need to be anyway re-read |
|
2484 delete iDelayedBuffer; |
|
2485 iDelayedBuffer = 0; |
|
2486 iDelayedWrite = EFalse; |
|
2487 |
|
2488 PRINT((_L("CVideoProcessor::MtroResume() - iFrameNumber = %d"), iFrameNumber)); |
|
2489 |
|
2490 if (!IsActive()) |
|
2491 { |
|
2492 // Make us active |
|
2493 PRINT((_L("CVideoProcessor::MtroResume() set active"))); |
|
2494 SetActive(); |
|
2495 iStatus = KRequestPending; |
|
2496 } |
|
2497 |
|
2498 PRINT((_L("CVideoProcessor::MtroResume() out"))); |
|
2499 |
|
2500 } |
|
2501 |
|
2502 // --------------------------------------------------------- |
|
2503 // CVideoProcessor::MtroReturnCodedBuffer |
|
2504 // Called by transcoder to return bitstream buffer |
|
2505 // (other items were commented in a header). |
|
2506 // --------------------------------------------------------- |
|
2507 // |
|
2508 void CVideoProcessor::MtroReturnCodedBuffer(CCMRMediaBuffer* aBuffer) |
|
2509 { |
|
2510 |
|
2511 VDASSERT(aBuffer == iMediaBuffer, 29); |
|
2512 |
|
2513 iIsThumbFrameBeingCopied = EFalse; |
|
2514 iDecoderSpecificInfoSent = ETrue; |
|
2515 |
|
2516 #ifdef _DEBUG |
|
2517 TTimeIntervalMicroSeconds ts = aBuffer->TimeStamp(); |
|
2518 |
|
2519 PRINT((_L("CVideoProcessor::MtroReturnCodedBuffer() timeStamp = %d ms"), |
|
2520 I64INT( ts.Int64() ) / 1000 )); |
|
2521 |
|
2522 #endif |
|
2523 |
|
2524 if (!iFirstColorTransitionFrame) |
|
2525 { |
|
2526 // Throw away the data for this frame: |
|
2527 VDASSERT(iDataLength >= iCurrentFrameLength,4); |
|
2528 Mem::Copy(iDataBuffer, iDataBuffer + iCurrentFrameLength, |
|
2529 iDataLength - iCurrentFrameLength); |
|
2530 iDataLength = iDataLength - iCurrentFrameLength; |
|
2531 iCurrentFrameLength = 0; |
|
2532 } |
|
2533 |
|
2534 if (!iThumbnailMode) |
|
2535 { |
|
2536 |
|
2537 // check if the next frame in queue is waiting to be encoded |
|
2538 // and start timer to detect possible frameskip |
|
2539 if ( IsNextFrameBeingEncoded() ) |
|
2540 { |
|
2541 if ( !iTimer->IsPending() ) |
|
2542 iTimer->SetTimer( TTimeIntervalMicroSeconds32( iMaxEncodingDelay ) ); |
|
2543 } |
|
2544 |
|
2545 if (iFrameInfoArray.Count() >= iMaxItemsInProcessingQueue) |
|
2546 { |
|
2547 PRINT((_L("CVideoProcessor::MtroReturnCodedBuffer() - %d items in queue, suspend decoding"), |
|
2548 iFrameInfoArray.Count() )); |
|
2549 |
|
2550 iDecodingSuspended = ETrue; |
|
2551 |
|
2552 return; |
|
2553 } |
|
2554 |
|
2555 VDASSERT(IsActive(), 40); |
|
2556 VDASSERT(iDecodePending, 41); |
|
2557 |
|
2558 // complete request |
|
2559 TRequestStatus *status = &iStatus; |
|
2560 User::RequestComplete(status, KErrNone); |
|
2561 } |
|
2562 else if (iDataFormat == EDataAVC) |
|
2563 { |
|
2564 |
|
2565 VDASSERT(IsActive(), 40); |
|
2566 VDASSERT(iDecodePending, 41); |
|
2567 |
|
2568 // NOTE: it would make sense to call AsyncStopL() here, |
|
2569 // but at least in WINSCW it didn't have any effect |
|
2570 //if (iThumbFramesToWrite == 0) |
|
2571 //{ |
|
2572 //iTransCoder->AsyncStopL(); |
|
2573 //iTranscoderStarted = EFalse; |
|
2574 //} |
|
2575 |
|
2576 if (iStatus == KRequestPending) |
|
2577 { |
|
2578 PRINT((_L("CVideoProcessor::MtroReturnCodedBuffer() - completing request"))); |
|
2579 // complete request |
|
2580 TRequestStatus *status = &iStatus; |
|
2581 User::RequestComplete(status, KErrNone); |
|
2582 } |
|
2583 } |
|
2584 } |
|
2585 |
|
2586 // --------------------------------------------------------- |
|
2587 // CVideoProcessor::MtroSetInputFrameRate |
|
2588 // Called by transcoder to request inout framerate |
|
2589 // (other items were commented in a header). |
|
2590 // --------------------------------------------------------- |
|
2591 // |
|
2592 void CVideoProcessor::MtroSetInputFrameRate(TReal& aRate) |
|
2593 { |
|
2594 TReal rate = iProcessor->GetVideoClipFrameRate(); |
|
2595 |
|
2596 if ( rate <= 15.0 ) |
|
2597 { |
|
2598 rate = 15.0; |
|
2599 } |
|
2600 else |
|
2601 { |
|
2602 rate = 30.0; |
|
2603 } |
|
2604 |
|
2605 aRate = rate; |
|
2606 } |
|
2607 |
|
2608 // --------------------------------------------------------- |
|
2609 // CVideoProcessor::MtroAsyncStopComplete |
|
2610 // Called by transcoder after async. stop is complete |
|
2611 // (other items were commented in a header). |
|
2612 // --------------------------------------------------------- |
|
2613 // |
|
2614 void CVideoProcessor::MtroAsyncStopComplete() |
|
2615 { |
|
2616 PRINT((_L("CVideoProcessor::MtroAsyncStopComplete()"))); |
|
2617 } |
|
2618 |
|
2619 // --------------------------------------------------------- |
|
2620 // CVideoProcessor::MtroPictureFromTranscoder |
|
2621 // Called by transcoder to return a decoded picture |
|
2622 // (other items were commented in a header). |
|
2623 // --------------------------------------------------------- |
|
2624 // |
|
2625 void CVideoProcessor::MtroPictureFromTranscoder(TTRVideoPicture* aPicture) |
|
2626 { |
|
2627 |
|
2628 TTimeIntervalMicroSeconds decodedTs = aPicture->iTimestamp; |
|
2629 |
|
2630 PRINT((_L("CVideoProcessor::MtroPictureFromTranscoder(), timestamp %d ms"), |
|
2631 I64INT( decodedTs.Int64() ) / 1000 )); |
|
2632 |
|
2633 if (iThumbnailMode) |
|
2634 { |
|
2635 iThumbDecoded = ETrue; |
|
2636 |
|
2637 // handle thumbnail |
|
2638 HandleThumbnailFromTranscoder(aPicture); |
|
2639 |
|
2640 return; |
|
2641 } |
|
2642 |
|
2643 // search the decoded frame from list |
|
2644 TInt index; |
|
2645 for (index = 0; index < iFrameInfoArray.Count(); ) |
|
2646 { |
|
2647 PRINT((_L("CVideoProcessor::MtroPictureFromTranscoder(), checking frame with index %d"), index)); |
|
2648 |
|
2649 TTimeIntervalMicroSeconds ts = |
|
2650 (iProcessor->GetVideoTimeInMsFromTicks(iFrameInfoArray[index].iTimeStamp, EFalse)) * 1000; |
|
2651 |
|
2652 if (ts < decodedTs && ( (iFrameInfoArray[index].iEncodeFrame == EFalse) || |
|
2653 (iFrameInfoArray[index].iTranscoderMode == EFullWithIM && |
|
2654 iFrameInfoArray[index].iModificationApplied == 0) ) ) |
|
2655 { |
|
2656 // if there are decode-only or transcoding w/intermediate modification |
|
2657 // frames in the queue before this one, remove those |
|
2658 PRINT((_L("CVideoProcessor::MtroPictureFromTranscoder(), removing frame with timestamp %d ms"), |
|
2659 I64INT( ts.Int64() ) / 1000 )); |
|
2660 |
|
2661 iFrameInfoArray.Remove(index); |
|
2662 // don't increment index |
|
2663 continue; |
|
2664 } |
|
2665 |
|
2666 if (ts == decodedTs) |
|
2667 { |
|
2668 PRINT((_L("CVideoProcessor::MtroPictureFromTranscoder(), found decoded frame at index %d"), index)); |
|
2669 break; |
|
2670 } |
|
2671 |
|
2672 index++; |
|
2673 } |
|
2674 |
|
2675 // If decoded frame is unexpected, i.e. it is not found from book-keeping, |
|
2676 // or it is not an intermediate modification frame, return frame here |
|
2677 // and continue |
|
2678 if ( index >= iFrameInfoArray.Count() || |
|
2679 ( iFrameInfoArray[index].iEncodeFrame == 1 && |
|
2680 iFrameInfoArray[index].iTranscoderMode != EFullWithIM ) ) |
|
2681 { |
|
2682 PRINT((_L("CVideoProcessor::MtroPictureFromTranscoder(), unexpected decoded frame, iTranscoderMode %d"), iTranscoderMode)); |
|
2683 // send picture back to transcoder |
|
2684 TInt error = KErrNone; |
|
2685 TRAP( error, iTransCoder->SendPictureToTranscoderL(aPicture) ); |
|
2686 if ( error != KErrNone ) |
|
2687 { |
|
2688 iMonitor->Error(error); |
|
2689 } |
|
2690 return; |
|
2691 } |
|
2692 |
|
2693 if (iFrameInfoArray[index].iEncodeFrame == EFalse) |
|
2694 { |
|
2695 // handle decode-only frame |
|
2696 HandleDecodeOnlyFrameFromTranscoder(aPicture, index); |
|
2697 return; |
|
2698 } |
|
2699 |
|
2700 // check color effect |
|
2701 TInt colorEffect = TColorEffect2TInt( iProcessor->GetColorEffect() ); |
|
2702 if (colorEffect != 0/*None*/) |
|
2703 { |
|
2704 // U,V value for the color toning |
|
2705 TInt colorToneU; |
|
2706 TInt colorToneV; |
|
2707 iProcessor->GetColorTone((TInt&)colorToneU, (TInt&)colorToneV); |
|
2708 // apply color effect |
|
2709 ApplySpecialEffect( colorEffect, const_cast<TUint8*>(aPicture->iRawData->Ptr()), colorToneU, colorToneV ); |
|
2710 |
|
2711 } |
|
2712 |
|
2713 if(iFrameInfoArray[index].iTransitionFrame == 1) |
|
2714 { |
|
2715 // apply transition to frame |
|
2716 HandleTransitionFrameFromTranscoder(aPicture, index); |
|
2717 } |
|
2718 |
|
2719 iFrameInfoArray[index].iModificationApplied = ETrue; |
|
2720 |
|
2721 // send picture back to transcoder for encoding |
|
2722 TInt error = KErrNone; |
|
2723 TRAP( error, iTransCoder->SendPictureToTranscoderL(aPicture) ); |
|
2724 if ( error != KErrNone ) |
|
2725 { |
|
2726 iMonitor->Error(error); |
|
2727 return; |
|
2728 } |
|
2729 |
|
2730 // check if the next frame is waiting to be encoded, set timer if so |
|
2731 if ( IsNextFrameBeingEncoded() ) |
|
2732 { |
|
2733 if ( !iTimer->IsPending() ) |
|
2734 iTimer->SetTimer( TTimeIntervalMicroSeconds32( iMaxEncodingDelay ) ); |
|
2735 } |
|
2736 |
|
2737 } |
|
2738 |
|
2739 |
|
2740 // --------------------------------------------------------- |
|
2741 // CVideoProcessor::HandleThumbnailFromTranscoder |
|
2742 // Handle thumbnail frame |
|
2743 // (other items were commented in a header). |
|
2744 // --------------------------------------------------------- |
|
2745 // |
|
2746 void CVideoProcessor::HandleThumbnailFromTranscoder(TTRVideoPicture* aPicture) |
|
2747 { |
|
2748 |
|
2749 TInt error = KErrNone; |
|
2750 |
|
2751 PRINT((_L("CVideoProcessor::HandleThumbnailFromTranscoder() begin"))); |
|
2752 |
|
2753 if (iProcessingComplete) |
|
2754 { |
|
2755 // if requested thumbnail has been done already, |
|
2756 // just release picture and return |
|
2757 PRINT((_L("CVideoProcessor::HandleThumbnailFromTranscoder(), thumb already finished, returning"))); |
|
2758 |
|
2759 TRAP( error, iTransCoder->SendPictureToTranscoderL(aPicture) ); |
|
2760 if ( error != KErrNone ) |
|
2761 { |
|
2762 iMonitor->Error(error); |
|
2763 return; |
|
2764 } |
|
2765 return; |
|
2766 } |
|
2767 |
|
2768 TInt yuvLength = iVideoWidth*iVideoHeight; |
|
2769 yuvLength += (yuvLength >> 1); |
|
2770 // copy to iFrameBuffer |
|
2771 Mem::Copy(iFrameBuffer, aPicture->iRawData->Ptr(), yuvLength); |
|
2772 |
|
2773 // release picture |
|
2774 TRAP( error, iTransCoder->SendPictureToTranscoderL(aPicture) ); |
|
2775 if ( error != KErrNone ) |
|
2776 { |
|
2777 iProcessor->NotifyThumbnailReady(error); |
|
2778 return; |
|
2779 } |
|
2780 |
|
2781 VDASSERT(iDecodePending, 33); |
|
2782 VDASSERT(IsActive(), 150); |
|
2783 |
|
2784 if (iStatus == KRequestPending) |
|
2785 { |
|
2786 |
|
2787 PRINT((_L("CVideoProcessor::HandleThumbnailFromTranscoder(), complete request"))); |
|
2788 // complete request |
|
2789 TRequestStatus *status = &iStatus; |
|
2790 User::RequestComplete(status, KErrNone); |
|
2791 } |
|
2792 |
|
2793 PRINT((_L("CVideoProcessor::HandleThumbnailFromTranscoder() end"))); |
|
2794 } |
|
2795 |
|
2796 // --------------------------------------------------------- |
|
2797 // CVideoProcessor::HandleDecodeOnlyFrameFromTranscoder |
|
2798 // Handle decode-only frame |
|
2799 // (other items were commented in a header). |
|
2800 // --------------------------------------------------------- |
|
2801 // |
|
2802 void CVideoProcessor::HandleDecodeOnlyFrameFromTranscoder(TTRVideoPicture* aPicture, TInt aIndex) |
|
2803 { |
|
2804 |
|
2805 VDASSERT(iFrameInfoArray[aIndex].iTranscoderMode == EDecodeOnly, 43); |
|
2806 |
|
2807 if ( iFrameInfoArray[aIndex].iTransitionFrame && |
|
2808 iFrameInfoArray[aIndex].iTransitionPosition == EPositionEndOfClip ) |
|
2809 { |
|
2810 if ( iEndTransitionColor == EColorTransition ) |
|
2811 { |
|
2812 // Save decoded frame to file |
|
2813 |
|
2814 TSize a = iProcessor->GetMovieResolution(); |
|
2815 TInt yuvLength = a.iWidth*a.iHeight; |
|
2816 yuvLength += (yuvLength>>1); |
|
2817 TPtr8 ptr(0,0); |
|
2818 TUint8* tmpBuf=0; |
|
2819 |
|
2820 ptr.Set( *aPicture->iRawData ); |
|
2821 tmpBuf = const_cast<TUint8*>(aPicture->iRawData->Ptr()); |
|
2822 |
|
2823 TInt colorEffect = TColorEffect2TInt( iProcessor->GetColorEffect() ); |
|
2824 if (colorEffect != 0 /*None*/) |
|
2825 { |
|
2826 // U,V value for the color toning |
|
2827 TInt colorToneU; |
|
2828 TInt colorToneV; |
|
2829 iProcessor->GetColorTone((TInt&)colorToneU, (TInt&)colorToneV); |
|
2830 // apply special effect |
|
2831 ApplySpecialEffect( colorEffect, tmpBuf, colorToneU, colorToneV ); |
|
2832 } |
|
2833 |
|
2834 TInt frameDuration = GetFrameDuration(iFrameInfoArray[aIndex].iFrameNumber); |
|
2835 |
|
2836 if (frameDuration <= 0) |
|
2837 { |
|
2838 TReal frameRate = iProcessor->GetVideoClipFrameRate(); |
|
2839 VDASSERT(frameRate > 0.0, 107); |
|
2840 TInt timeScale = iProcessor->GetVideoClipTimeScale(); |
|
2841 TInt64 durationMs = TInt64( ( 1000.0 / frameRate ) + 0.5 ); |
|
2842 |
|
2843 // in ticks |
|
2844 frameDuration = TInt( ( (TReal)durationMs * (TReal)timeScale / 1000.0 ) + 0.5 ); |
|
2845 } |
|
2846 |
|
2847 TInt error = iProcessor->SaveVideoFrameToFile( ptr, frameDuration, iFrameInfoArray[aIndex].iTimeStamp ); |
|
2848 if ( error != KErrNone ) |
|
2849 { |
|
2850 PRINT((_L("CVideoProcessor::HandleDecodeOnlyFrameFromTranscoder() - SaveVideoFrameToFile failed"))); |
|
2851 iMonitor->Error(error); |
|
2852 return; |
|
2853 } |
|
2854 } |
|
2855 } |
|
2856 |
|
2857 iFrameInfoArray.Remove(aIndex); |
|
2858 |
|
2859 PRINT((_L("CVideoProcessor::HandleDecodeOnlyFrameFromTranscoder() - removed decode only pic, %d items in queue"), |
|
2860 iFrameInfoArray.Count())); |
|
2861 |
|
2862 // release picture |
|
2863 TInt error = KErrNone; |
|
2864 TRAP( error, iTransCoder->SendPictureToTranscoderL(aPicture) ); |
|
2865 if ( error != KErrNone ) |
|
2866 { |
|
2867 iMonitor->Error(error); |
|
2868 return; |
|
2869 } |
|
2870 |
|
2871 if (iStreamEndRead && iFrameInfoArray.Count() == 0 ) |
|
2872 { |
|
2873 PRINT((_L("CVideoProcessor::HandleDecodeOnlyFrameFromTranscoder() - stream end read, no frames left"))); |
|
2874 if (!IsActive()) |
|
2875 { |
|
2876 SetActive(); |
|
2877 iStatus = KRequestPending; |
|
2878 } |
|
2879 iTimer->CancelTimer(); |
|
2880 iProcessingComplete = ETrue; |
|
2881 // activate object to end processing |
|
2882 TRequestStatus *status = &iStatus; |
|
2883 User::RequestComplete(status, KErrNone); |
|
2884 return; |
|
2885 } |
|
2886 |
|
2887 if (iDecodingSuspended && !iStreamEndRead) |
|
2888 { |
|
2889 if (iFrameInfoArray.Count() < iMaxItemsInProcessingQueue && !iDelayedWrite) |
|
2890 { |
|
2891 PRINT((_L("CVideoProcessor::HandleDecodeOnlyFrameFromTranscoder() - Resume decoding"))); |
|
2892 iDecodingSuspended = EFalse; |
|
2893 // activate object to start decoding |
|
2894 TRequestStatus *status = &iStatus; |
|
2895 User::RequestComplete(status, KErrNone); |
|
2896 return; |
|
2897 } |
|
2898 } |
|
2899 |
|
2900 // check if the next frame is waiting to be encoded, set timer if so |
|
2901 if ( IsNextFrameBeingEncoded() ) |
|
2902 { |
|
2903 if ( !iTimer->IsPending() ) |
|
2904 iTimer->SetTimer( TTimeIntervalMicroSeconds32( iMaxEncodingDelay ) ); |
|
2905 } |
|
2906 |
|
2907 return; |
|
2908 |
|
2909 } |
|
2910 |
|
2911 |
|
2912 // --------------------------------------------------------- |
|
2913 // CVideoProcessor::HandleTransitionFrameFromTranscoder |
|
2914 // Handle transition frame |
|
2915 // (other items were commented in a header). |
|
2916 // --------------------------------------------------------- |
|
2917 // |
|
2918 void CVideoProcessor::HandleTransitionFrameFromTranscoder(TTRVideoPicture* aPicture, TInt aIndex) |
|
2919 { |
|
2920 |
|
2921 // apply transition effect in spatial domain (to yuv data in encoder buffer) |
|
2922 if ( iFrameInfoArray[aIndex].iTransitionPosition == EPositionStartOfClip && |
|
2923 iStartTransitionColor == EColorTransition ) |
|
2924 { |
|
2925 // Do blending transition: wipe / crossfade |
|
2926 |
|
2927 TSize a = iProcessor->GetMovieResolution(); |
|
2928 TInt yuvLength = a.iWidth*a.iHeight; |
|
2929 yuvLength += (yuvLength>>1); |
|
2930 |
|
2931 if (iFrameInfoArray[aIndex].iRepeatFrame == 0) |
|
2932 { |
|
2933 if( !iColorTransitionBuffer ) |
|
2934 { |
|
2935 iColorTransitionBuffer = (TUint8*)User::Alloc( yuvLength ); |
|
2936 if (!iColorTransitionBuffer) |
|
2937 { |
|
2938 iMonitor->Error(KErrNoMemory); |
|
2939 return; |
|
2940 } |
|
2941 } |
|
2942 |
|
2943 if( !iOrigPreviousYUVBuffer ) |
|
2944 { |
|
2945 iOrigPreviousYUVBuffer = (TUint8*)User::Alloc( yuvLength ); |
|
2946 if (!iOrigPreviousYUVBuffer) |
|
2947 { |
|
2948 iMonitor->Error(KErrNoMemory); |
|
2949 return; |
|
2950 } |
|
2951 } |
|
2952 |
|
2953 TPtr8 ptr( iColorTransitionBuffer, 0, yuvLength ); |
|
2954 |
|
2955 if ( iProcessor->GetVideoFrameFromFile( ptr, yuvLength, iFrameDuration, iTimeStamp ) != KErrNone ) |
|
2956 //|| iFrameDuration == 0 || iTimeStamp == 0 ) |
|
2957 { |
|
2958 // failure in reading frame data from previous clip |
|
2959 // continue using the current frame data |
|
2960 Mem::Copy( iColorTransitionBuffer, aPicture->iRawData->Ptr(), yuvLength ); |
|
2961 } |
|
2962 else |
|
2963 { |
|
2964 // buffer frame from previous clip (read from file) |
|
2965 Mem::Copy( iOrigPreviousYUVBuffer, iColorTransitionBuffer, yuvLength ); |
|
2966 if ( iStartOfClipTransition == (TInt)EVedMiddleTransitionEffectCrossfade ) |
|
2967 { |
|
2968 // Target frame is the one read from file, iColorTransitionBuffer |
|
2969 ApplyBlendingTransitionEffect( iColorTransitionBuffer, const_cast<TUint8*>(aPicture->iRawData->Ptr()), |
|
2970 0 /* repeatFrame */, iFrameInfoArray[aIndex].iTransitionFrameNumber); |
|
2971 } |
|
2972 else |
|
2973 { |
|
2974 // Target frame is the one read from file, iColorTransitionBuffer |
|
2975 ApplySlidingTransitionEffect( iColorTransitionBuffer, const_cast<TUint8*>(aPicture->iRawData->Ptr()), (TVedMiddleTransitionEffect)iStartOfClipTransition, |
|
2976 0 /* repeatFrame */, iFrameInfoArray[aIndex].iTransitionFrameNumber); |
|
2977 } |
|
2978 // copy frame from edited buffer to transcoder buffer |
|
2979 Mem::Copy( const_cast<TUint8*>(aPicture->iRawData->Ptr()), iColorTransitionBuffer, yuvLength ); |
|
2980 } |
|
2981 } |
|
2982 else |
|
2983 { |
|
2984 // repeatFrame |
|
2985 |
|
2986 if ( iStartOfClipTransition == (TInt)EVedMiddleTransitionEffectCrossfade ) |
|
2987 { |
|
2988 ApplyBlendingTransitionEffect( iOrigPreviousYUVBuffer, const_cast<TUint8*>(aPicture->iRawData->Ptr()), |
|
2989 1 /* repeatFrame */, iFrameInfoArray[aIndex].iTransitionFrameNumber); |
|
2990 } |
|
2991 else |
|
2992 { |
|
2993 ApplySlidingTransitionEffect( iOrigPreviousYUVBuffer, const_cast<TUint8*>(aPicture->iRawData->Ptr()), (TVedMiddleTransitionEffect)iStartOfClipTransition, |
|
2994 1 /* repeatFrame */, iFrameInfoArray[aIndex].iTransitionFrameNumber ); |
|
2995 } |
|
2996 // copy frame from edited buffer to transcoder buffer |
|
2997 Mem::Copy( const_cast<TUint8*>(aPicture->iRawData->Ptr()), iOrigPreviousYUVBuffer, yuvLength ); |
|
2998 } |
|
2999 } |
|
3000 else |
|
3001 { |
|
3002 // apply transition effect in spatial domain (to yuv data in encoder buffer) |
|
3003 |
|
3004 // Do fading transition |
|
3005 ApplyFadingTransitionEffect(const_cast<TUint8*>(aPicture->iRawData->Ptr()), iFrameInfoArray[aIndex].iTransitionPosition, iFrameInfoArray[aIndex].iTransitionColor, |
|
3006 iFrameInfoArray[aIndex].iTransitionFrameNumber); |
|
3007 } |
|
3008 |
|
3009 } |
|
3010 |
|
3011 // --------------------------------------------------------- |
|
3012 // CVideoProcessor::ProcessThumb |
|
3013 // Starts thumbnail generation |
|
3014 // (other items were commented in a header). |
|
3015 // --------------------------------------------------------- |
|
3016 // |
|
3017 TInt CVideoProcessor::ProcessThumb(MThumbnailObserver* aThumbObserver, TInt aFrameIndex, TInt aStartFrameIndex, TVedTranscodeFactor* aFactor) |
|
3018 { |
|
3019 TInt error; |
|
3020 // TInt goodFrame = 0; |
|
3021 // TInt frameSkip = 10; |
|
3022 // TInt frameNumber = aStartFrameIndex; |
|
3023 TPtrC8 inputPtr; |
|
3024 TPtr8 outputPtr(0,0); |
|
3025 |
|
3026 iThumbObserver = aThumbObserver; |
|
3027 iThumbFrameIndex = aFrameIndex; |
|
3028 iThumbFrameNumber = aStartFrameIndex; |
|
3029 iFramesToSkip = 0; |
|
3030 iNumThumbFrameSkips = 0; |
|
3031 iPreviousTimeStamp = TTimeIntervalMicroSeconds(-1); |
|
3032 iProcessingComplete = EFalse; |
|
3033 |
|
3034 iThumbFramesToWrite = iProcessor->GetOutputNumberOfFrames() - iThumbFrameNumber; |
|
3035 |
|
3036 // get transcode factor to determine input stream type |
|
3037 TRAP(error, GetTranscodeFactorL(*aFactor)); |
|
3038 if (error != KErrNone) |
|
3039 return error; |
|
3040 |
|
3041 TVedVideoType inType; |
|
3042 if (aFactor->iStreamType == EVedVideoBitstreamModeH263) |
|
3043 inType = EVedVideoTypeH263Profile0Level10; |
|
3044 |
|
3045 else if (aFactor->iStreamType == EVedVideoBitstreamModeAVC) |
|
3046 inType = EVedVideoTypeAVCBaselineProfile; |
|
3047 |
|
3048 else |
|
3049 inType = EVedVideoTypeMPEG4SimpleProfile; |
|
3050 |
|
3051 if (aFactor->iStreamType == EVedVideoTypeUnrecognized || |
|
3052 aFactor->iStreamType == EVedVideoTypeNoVideo) |
|
3053 return KErrorCode; |
|
3054 |
|
3055 iDecoding = ETrue; |
|
3056 |
|
3057 // first frame is now read in iDataBuffer, initialize transcoder |
|
3058 TRAP(error, CreateAndInitializeTranscoderL(inType, CTRTranscoder::EDecoding)) |
|
3059 if (error != KErrNone) |
|
3060 return error; |
|
3061 |
|
3062 // wait for initialisation to complete => RunL |
|
3063 |
|
3064 return KErrNone; |
|
3065 |
|
3066 } |
|
3067 |
|
3068 // --------------------------------------------------------- |
|
3069 // CVideoProcessor::ProcessThumb |
|
3070 // Processes a thumbnail frame internally |
|
3071 // (other items were commented in a header). |
|
3072 // --------------------------------------------------------- |
|
3073 // |
|
3074 void CVideoProcessor::ProcessThumb(TBool aFirstFrame) |
|
3075 { |
|
3076 |
|
3077 PRINT((_L("CVideoProcessor::ProcessThumb() - begin()"))); |
|
3078 |
|
3079 iThumbDecoded = EFalse; |
|
3080 |
|
3081 if (aFirstFrame) |
|
3082 { |
|
3083 // frame read in iDataBuffer, decode |
|
3084 CCMRMediaBuffer::TBufferType bt = |
|
3085 (iDataFormat == EDataH263) ? CCMRMediaBuffer::EVideoH263 : CCMRMediaBuffer::EVideoMPEG4; |
|
3086 TInt index = iThumbFrameNumber - ( iProcessor->GetOutputNumberOfFrames() - |
|
3087 iProcessor->GetClipNumberOfFrames() ); |
|
3088 TTimeIntervalMicroSeconds ts = |
|
3089 TTimeIntervalMicroSeconds(iProcessor->GetVideoTimeInMsFromTicks(iProcessor->VideoFrameTimeStamp(index), ETrue) * TInt64(1000) ); |
|
3090 |
|
3091 // Get the AVC bit stream and add NAL headers |
|
3092 if(iDataFormat == EDataAVC) |
|
3093 { |
|
3094 TInt error = KErrNone; |
|
3095 |
|
3096 // insert dec.config. record in the beginning of buffer |
|
3097 TRAP( error, InsertDecoderSpecificInfoL() ); |
|
3098 if (error != KErrNone) |
|
3099 { |
|
3100 iProcessor->NotifyThumbnailReady(error); |
|
3101 return; |
|
3102 } |
|
3103 |
|
3104 PRINT((_L("CVideoProcessor::ProcessThumb() - ProcessAVCBitStream()"))); |
|
3105 |
|
3106 TPtr8 ptr(iDataBuffer, iCurrentFrameLength, iBufferLength); |
|
3107 TRAP( error, iAvcEdit->ProcessAVCBitStreamL((TDes8&)(ptr), (TInt&)(iCurrentFrameLength), |
|
3108 iProcessor->GetDecoderSpecificInfoSize(), ETrue ) ); |
|
3109 |
|
3110 if (error != KErrNone) |
|
3111 { |
|
3112 iProcessor->NotifyThumbnailReady(error); |
|
3113 return; |
|
3114 } |
|
3115 iDataLength = iCurrentFrameLength; |
|
3116 |
|
3117 } |
|
3118 |
|
3119 // insert VOL header to beginning of buffer |
|
3120 if (iDataFormat == EDataMPEG4) |
|
3121 { |
|
3122 TRAPD( error, InsertDecoderSpecificInfoL() ); |
|
3123 if (error != KErrNone) |
|
3124 { |
|
3125 iProcessor->NotifyThumbnailReady(error); |
|
3126 return; |
|
3127 } |
|
3128 } |
|
3129 |
|
3130 iMediaBuffer->Set( TPtrC8(iDataBuffer, iBufferLength), |
|
3131 bt, |
|
3132 iCurrentFrameLength, |
|
3133 ETrue, // keyFrame |
|
3134 ts |
|
3135 ); |
|
3136 |
|
3137 iPreviousTimeStamp = ts; |
|
3138 |
|
3139 iIsThumbFrameBeingCopied = ETrue; |
|
3140 iDecodePending = ETrue; |
|
3141 if (!IsActive()) |
|
3142 { |
|
3143 SetActive(); |
|
3144 iStatus = KRequestPending; |
|
3145 } |
|
3146 |
|
3147 PRINT((_L("CVideoProcessor::ProcessThumb() - WriteCodedBuffer, thumb frame #%d, timestamp %d ms"), |
|
3148 iThumbFrameNumber, I64INT( ts.Int64() ) / 1000 )); |
|
3149 |
|
3150 TRAPD( err, iTransCoder->WriteCodedBufferL(iMediaBuffer) ); |
|
3151 if (err != KErrNone) |
|
3152 { |
|
3153 // ready |
|
3154 FinalizeThumb(err); |
|
3155 return; |
|
3156 } |
|
3157 iThumbFramesToWrite--; |
|
3158 |
|
3159 return; |
|
3160 } |
|
3161 |
|
3162 if (iThumbFrameIndex == 0) |
|
3163 { |
|
3164 // ready |
|
3165 FinalizeThumb(KErrNone); |
|
3166 return; |
|
3167 } |
|
3168 |
|
3169 iThumbFrameNumber++; |
|
3170 |
|
3171 if (iDataFormat == EDataAVC) |
|
3172 { |
|
3173 // In AVC case, we have to stop decoding before the very last |
|
3174 // frames are decoded, since for some reason the transcoder/decoder |
|
3175 // does not decode those frames |
|
3176 |
|
3177 // get max number of buffered frames according to spec |
|
3178 TInt buffered = iAvcEdit->GetMaxAVCFrameBuffering(iInputAVCLevel, TSize(iVideoWidth, iVideoHeight)); |
|
3179 |
|
3180 if (iThumbFrameNumber > iProcessor->GetOutputNumberOfFrames() - 1 - buffered ) |
|
3181 { |
|
3182 // ready |
|
3183 FinalizeThumb(KErrNone); |
|
3184 return; |
|
3185 } |
|
3186 } |
|
3187 |
|
3188 if (iThumbFrameIndex < 0) |
|
3189 { |
|
3190 if (iFramesToSkip == 0) |
|
3191 { |
|
3192 PRINT((_L("CVideoProcessor::ProcessThumb() frameskip done %d times"), iNumThumbFrameSkips)); |
|
3193 |
|
3194 // limit the number of frame skip cycles to 3, because with |
|
3195 // near-black or near-white videos we may never find a good thumb. |
|
3196 // => max. 30 frames are decoded to get the thumb |
|
3197 |
|
3198 // check quality & frame skip cycles |
|
3199 if ( CheckFrameQuality(iFrameBuffer) || iNumThumbFrameSkips >= 3 ) |
|
3200 { |
|
3201 // quality ok or searched long enough, return |
|
3202 FinalizeThumb(KErrNone); |
|
3203 return; |
|
3204 } |
|
3205 iFramesToSkip = 10; |
|
3206 iNumThumbFrameSkips++; |
|
3207 } |
|
3208 else |
|
3209 iFramesToSkip--; |
|
3210 |
|
3211 // read new frame & decode |
|
3212 } |
|
3213 |
|
3214 if (iThumbFrameIndex > 0) |
|
3215 { |
|
3216 if (iThumbFrameNumber > iThumbFrameIndex) |
|
3217 { |
|
3218 // ready |
|
3219 FinalizeThumb(KErrNone); |
|
3220 return; |
|
3221 } |
|
3222 // read new frame & decode |
|
3223 } |
|
3224 |
|
3225 if (iIsThumbFrameBeingCopied) |
|
3226 { |
|
3227 PRINT((_L("CVideoProcessor::ProcessThumb() - thumb being copied, activate"))); |
|
3228 // Re-activate to wait for MtroReturnCodedBuffer |
|
3229 iDecodePending = ETrue; |
|
3230 if (!IsActive()) |
|
3231 { |
|
3232 SetActive(); |
|
3233 iStatus = KRequestPending; |
|
3234 } |
|
3235 } |
|
3236 else |
|
3237 { |
|
3238 PRINT((_L("CVideoProcessor::ProcessThumb() - read and write new"))); |
|
3239 // send new frame for decoding |
|
3240 ReadAndWriteThumbFrame(); |
|
3241 } |
|
3242 |
|
3243 PRINT((_L("CVideoProcessor::ProcessThumb() - end"))); |
|
3244 |
|
3245 } |
|
3246 |
|
3247 // --------------------------------------------------------- |
|
3248 // CVideoProcessor::ReadAndWriteThumbFrame |
|
3249 // Reads a new frame to input queue and sends it to transcoder |
|
3250 // (other items were commented in a header). |
|
3251 // --------------------------------------------------------- |
|
3252 // |
|
3253 void CVideoProcessor::ReadAndWriteThumbFrame() |
|
3254 { |
|
3255 |
|
3256 PRINT((_L("CVideoProcessor::ReadAndWriteThumbFrame() - begin, thumb frames to write %d"), |
|
3257 iThumbFramesToWrite)); |
|
3258 |
|
3259 TInt error = KErrNone; |
|
3260 |
|
3261 if ( iThumbFramesToWrite ) |
|
3262 { |
|
3263 // read new frame to input queue |
|
3264 if(iThumbFrameNumber < (iProcessor->GetOutputNumberOfFrames())) // do not read last frame (already read!) |
|
3265 { |
|
3266 CMP4Demux *demux = (CMP4Demux *)iProcessor->GetDemux(); |
|
3267 error = demux->ReadVideoFrames(1); |
|
3268 if (error != KErrNone) |
|
3269 { |
|
3270 FinalizeThumb(error); |
|
3271 return; |
|
3272 } |
|
3273 } |
|
3274 else |
|
3275 { |
|
3276 // no frames left, return |
|
3277 FinalizeThumb(KErrNone); |
|
3278 return; |
|
3279 } |
|
3280 |
|
3281 iDataLength = 0; |
|
3282 iCurrentFrameLength = 0; |
|
3283 iDataFormat = EDataUnknown; |
|
3284 |
|
3285 if (ReadFrame()) |
|
3286 { |
|
3287 // frame read in iDataBuffer, decode |
|
3288 CCMRMediaBuffer::TBufferType bt = |
|
3289 (iDataFormat == EDataH263) ? CCMRMediaBuffer::EVideoH263 : CCMRMediaBuffer::EVideoMPEG4; |
|
3290 |
|
3291 TInt index = iThumbFrameNumber - ( iProcessor->GetOutputNumberOfFrames() - |
|
3292 iProcessor->GetClipNumberOfFrames() ); |
|
3293 |
|
3294 TTimeIntervalMicroSeconds ts = |
|
3295 TTimeIntervalMicroSeconds(iProcessor->GetVideoTimeInMsFromTicks(iProcessor->VideoFrameTimeStamp(index), ETrue) * TInt64(1000) ); |
|
3296 |
|
3297 if (ts <= iPreviousTimeStamp) |
|
3298 { |
|
3299 // adjust timestamp so that its bigger than ts of previous frame |
|
3300 TReal frameRate = iProcessor->GetVideoClipFrameRate(); |
|
3301 VDASSERT(frameRate > 0.0, 108); |
|
3302 TInt64 durationMs = TInt64( ( 1000.0 / frameRate ) + 0.5 ); |
|
3303 durationMs /= 2; // add half the duration of one frame |
|
3304 |
|
3305 ts = TTimeIntervalMicroSeconds( iPreviousTimeStamp.Int64() + durationMs*1000 ); |
|
3306 } |
|
3307 |
|
3308 iPreviousTimeStamp = ts; |
|
3309 |
|
3310 // Get the AVC bit stream and add NAL headers |
|
3311 if(iDataFormat == EDataAVC) |
|
3312 { |
|
3313 TPtr8 ptr(iDataBuffer, iCurrentFrameLength, iBufferLength); |
|
3314 TRAPD( error, iAvcEdit->ProcessAVCBitStreamL((TDes8&)(ptr), (TInt&)(iCurrentFrameLength), |
|
3315 iProcessor->GetDecoderSpecificInfoSize(), EFalse ) ); |
|
3316 |
|
3317 if (error != KErrNone) |
|
3318 { |
|
3319 FinalizeThumb(error); |
|
3320 return; |
|
3321 } |
|
3322 iDataLength = iCurrentFrameLength; |
|
3323 } |
|
3324 |
|
3325 iMediaBuffer->Set( TPtrC8(iDataBuffer, iBufferLength), |
|
3326 bt, |
|
3327 iCurrentFrameLength, |
|
3328 iProcessor->GetVideoFrameType(index), |
|
3329 ts ); |
|
3330 |
|
3331 iIsThumbFrameBeingCopied = ETrue; |
|
3332 iDecodePending = ETrue; |
|
3333 if (!IsActive()) |
|
3334 { |
|
3335 SetActive(); |
|
3336 iStatus = KRequestPending; |
|
3337 } |
|
3338 |
|
3339 PRINT((_L("CVideoProcessor::ProcessThumb() - WriteCodedBuffer, thumb frame #%d, timestamp %d ms"), |
|
3340 iThumbFrameNumber, I64INT( ts.Int64() ) / 1000 )); |
|
3341 |
|
3342 TRAPD( err, iTransCoder->WriteCodedBufferL(iMediaBuffer) ); |
|
3343 if (err != KErrNone) |
|
3344 { |
|
3345 FinalizeThumb(err); |
|
3346 } |
|
3347 iThumbFramesToWrite--; |
|
3348 return; |
|
3349 } |
|
3350 } |
|
3351 |
|
3352 else |
|
3353 { |
|
3354 if (iDataFormat == EDataAVC) |
|
3355 { |
|
3356 PRINT((_L("CVideoProcessor::ReadAndWriteThumbFrame() - all frames written, wait for output"))); |
|
3357 // all necessary frames written to decoder, now wait for output frames |
|
3358 iDecodePending = ETrue; |
|
3359 if (!IsActive()) |
|
3360 { |
|
3361 SetActive(); |
|
3362 iStatus = KRequestPending; |
|
3363 } |
|
3364 } |
|
3365 else |
|
3366 { |
|
3367 FinalizeThumb(KErrNone); |
|
3368 } |
|
3369 } |
|
3370 |
|
3371 PRINT((_L("CVideoProcessor::ReadAndWriteThumbFrame() - end"))); |
|
3372 } |
|
3373 |
|
3374 // --------------------------------------------------------- |
|
3375 // CVideoProcessor::FinalizeThumb |
|
3376 // (other items were commented in a header). |
|
3377 // --------------------------------------------------------- |
|
3378 // |
|
3379 void CVideoProcessor::FinalizeThumb(TInt aError) |
|
3380 { |
|
3381 iProcessingComplete = ETrue; |
|
3382 if (iTranscoderStarted) |
|
3383 { |
|
3384 TRAPD( err, iTransCoder->StopL() ); |
|
3385 if (err != KErrNone) { } |
|
3386 iTranscoderStarted = EFalse; |
|
3387 } |
|
3388 iProcessor->NotifyThumbnailReady(aError); |
|
3389 } |
|
3390 |
|
3391 // --------------------------------------------------------- |
|
3392 // CVideoProcessor::FetchThumb |
|
3393 // For getting a pointer to YUV thumbnail frame |
|
3394 // (other items were commented in a header). |
|
3395 // --------------------------------------------------------- |
|
3396 // |
|
3397 TInt CVideoProcessor::FetchThumb(TUint8** aYUVDataPtr) |
|
3398 { |
|
3399 *aYUVDataPtr = iFrameBuffer; |
|
3400 |
|
3401 return KErrNone; |
|
3402 } |
|
3403 |
|
3404 |
|
3405 // --------------------------------------------------------- |
|
3406 // CVideoProcessor::GetTranscodeFactorL |
|
3407 // Gets the transcode factor from the current clip |
|
3408 // (other items were commented in a header). |
|
3409 // --------------------------------------------------------- |
|
3410 // |
|
3411 TInt CVideoProcessor::GetTranscodeFactorL(TVedTranscodeFactor& aFactor) |
|
3412 { |
|
3413 // start reading video frames |
|
3414 CMP4Demux *demux = (CMP4Demux *)iProcessor->GetDemux(); |
|
3415 TInt error = demux->ReadVideoFrames(1); |
|
3416 |
|
3417 if (error != KErrNone) |
|
3418 User::Leave(error); |
|
3419 |
|
3420 // seek to and decode first frame |
|
3421 if (!ReadFrame()) |
|
3422 User::Leave(KErrCorrupt); |
|
3423 |
|
3424 // Get pointer to the frame data |
|
3425 TPtr8 inputPtr(0,0); |
|
3426 if ( iDataFormat == EDataH263 ) |
|
3427 inputPtr.Set(iDataBuffer, iCurrentFrameLength + KH263StartCodeLength, iCurrentFrameLength + KH263StartCodeLength); |
|
3428 else |
|
3429 inputPtr.Set(iDecoderSpecificInfo->Des()); |
|
3430 |
|
3431 if(iDataFormat == EDataAVC) |
|
3432 { |
|
3433 // @@ HARI AVC harcode for now |
|
3434 // Set transcode factors |
|
3435 aFactor.iTRes = 30; |
|
3436 aFactor.iStreamType = EVedVideoBitstreamModeAVC; |
|
3437 } |
|
3438 else |
|
3439 { |
|
3440 // Get the VOL header from the frame data |
|
3441 CVedVolReader* reader = CVedVolReader::NewL(); |
|
3442 CleanupStack::PushL(reader); |
|
3443 reader->ParseVolHeaderL((TDesC8&) inputPtr); |
|
3444 |
|
3445 // Set transcode factors |
|
3446 aFactor.iTRes = reader->TimeIncrementResolution(); |
|
3447 aFactor.iStreamType = reader->BitstreamMode(); |
|
3448 |
|
3449 CleanupStack::PopAndDestroy(reader); |
|
3450 } |
|
3451 |
|
3452 return KErrNone; |
|
3453 } |
|
3454 |
|
3455 |
|
3456 |
|
3457 // --------------------------------------------------------- |
|
3458 // CVideoProcessor::CheckFrameQuality |
|
3459 // Checks if a frame has "good" or "legible" quality |
|
3460 // (other items were commented in a header). |
|
3461 // --------------------------------------------------------- |
|
3462 // |
|
3463 TInt CVideoProcessor::CheckFrameQuality(TUint8* aYUVDataPtr) |
|
3464 { |
|
3465 TInt i; |
|
3466 TInt minValue = 255; |
|
3467 TInt maxValue = 0; |
|
3468 TInt goodFrame = 1; |
|
3469 TInt runningSum=0; |
|
3470 TInt averageValue=0; |
|
3471 TInt pixelSkips = 4; |
|
3472 TInt numberOfSamples=0; |
|
3473 TInt minMaxDeltaThreshold = 20; |
|
3474 TInt extremeRegionThreshold = 20; |
|
3475 TInt ySize = iVideoWidth*iVideoHeight; |
|
3476 |
|
3477 // gather image statistics |
|
3478 for(i=0, numberOfSamples=0; i<ySize; i+=pixelSkips, aYUVDataPtr+=pixelSkips, numberOfSamples++) |
|
3479 { |
|
3480 runningSum += *aYUVDataPtr; |
|
3481 if(*aYUVDataPtr > maxValue) |
|
3482 maxValue = *aYUVDataPtr; |
|
3483 if(*aYUVDataPtr < minValue) |
|
3484 minValue = *aYUVDataPtr; |
|
3485 } |
|
3486 VDASSERT(numberOfSamples,10); |
|
3487 averageValue = runningSum/numberOfSamples; |
|
3488 |
|
3489 // make decision based statistics |
|
3490 if((maxValue - minValue) < minMaxDeltaThreshold) |
|
3491 goodFrame = 0; |
|
3492 else |
|
3493 { |
|
3494 if(averageValue < (minValue + extremeRegionThreshold) || |
|
3495 averageValue > (maxValue - extremeRegionThreshold)) |
|
3496 goodFrame = 0; |
|
3497 } |
|
3498 return goodFrame; |
|
3499 } |
|
3500 |
|
3501 |
|
3502 // --------------------------------------------------------- |
|
3503 // CVideoProcessor::ReadFrame |
|
3504 // Reads a frame from input queue to internal buffer |
|
3505 // (other items were commented in a header). |
|
3506 // --------------------------------------------------------- |
|
3507 // |
|
3508 TInt CVideoProcessor::ReadFrame() |
|
3509 { |
|
3510 |
|
3511 TUint doNow; |
|
3512 |
|
3513 if (iProcessor->GetCurrentClipVideoType() == EVedVideoTypeAVCBaselineProfile) |
|
3514 iDataFormat = EDataAVC; |
|
3515 |
|
3516 // Determine data format if needed |
|
3517 if ( iDataFormat == EDataUnknown ) |
|
3518 { |
|
3519 // We'll need four bytes of data |
|
3520 while ( iDataLength < 4 ) |
|
3521 { |
|
3522 // Get a block (we can't have one as we go here only at the stream |
|
3523 // start) |
|
3524 VDASSERT(!iBlock,11); |
|
3525 while ( (!iBlock) || (iBlock->Length() == 0) ) |
|
3526 { |
|
3527 if ( iBlock ) |
|
3528 iQueue->ReturnBlock(iBlock); |
|
3529 if ( (iBlock = iQueue->ReadBlock()) == NULL ) |
|
3530 return EFalse; |
|
3531 } |
|
3532 iBlockPos = 0; |
|
3533 |
|
3534 // get timestamp for first frame |
|
3535 if ( iTiming == ETimeStamp ) |
|
3536 { |
|
3537 VDASSERT( (TUint)iBlock->Length() >= 8,12 ); |
|
3538 iBlockPos += 4; |
|
3539 } |
|
3540 |
|
3541 // Copy data from block to buffer: |
|
3542 doNow = 4 - iDataLength; |
|
3543 if ( doNow > (TUint) iBlock->Length() - iBlockPos ) |
|
3544 doNow = iBlock->Length() - iBlockPos; |
|
3545 Mem::Copy(iDataBuffer+iDataLength, iBlock->Ptr()+iBlockPos, doNow); |
|
3546 iDataLength += doNow; |
|
3547 iBlockPos += doNow; |
|
3548 |
|
3549 // Return the block if it doesn't have any more data |
|
3550 if ( ((TInt)iBlockPos == iBlock->Length()) ) |
|
3551 { |
|
3552 iQueue->ReturnBlock(iBlock); |
|
3553 iBlock = 0; |
|
3554 } |
|
3555 } |
|
3556 |
|
3557 // OK, we have 4 bytes of data. Check if the buffer starts with a |
|
3558 // H.263 PSC: |
|
3559 if ( (iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && |
|
3560 ((iDataBuffer[2] & 0xfc) == 0x80) ) |
|
3561 { |
|
3562 // Yes, this is a H.263 stream |
|
3563 iDataFormat = EDataH263; |
|
3564 } |
|
3565 |
|
3566 // It should be MPEG-4, check if it starts with MPEG 4 Visual |
|
3567 // Object Sequence start code, Visual Object start code, Video |
|
3568 // Object start code, or Video Object Layer start code |
|
3569 else if ( ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && (iDataBuffer[3] == 0xb0)) || |
|
3570 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && (iDataBuffer[3] == 0xb6)) || |
|
3571 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && (iDataBuffer[3] == 0xb3)) || |
|
3572 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && (iDataBuffer[3] == 0xb5)) || |
|
3573 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && ((iDataBuffer[3] >> 5) == 0)) || |
|
3574 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && ((iDataBuffer[3] >> 4) == 2))) |
|
3575 { |
|
3576 iDataFormat = EDataMPEG4; |
|
3577 } |
|
3578 else |
|
3579 { |
|
3580 PRINT((_L("CVideoProcessor::ReadFrame() - no PSC or MPEG-4 start code in the start of the buffer"))); |
|
3581 if (iMonitor) |
|
3582 iMonitor->Error(KErrCorrupt); |
|
3583 return EFalse; |
|
3584 } |
|
3585 } |
|
3586 |
|
3587 // Determine the start code length |
|
3588 TUint startCodeLength = 0; |
|
3589 switch (iDataFormat) |
|
3590 { |
|
3591 case EDataH263: |
|
3592 startCodeLength = KH263StartCodeLength; |
|
3593 break; |
|
3594 case EDataMPEG4: |
|
3595 startCodeLength = KMPEG4StartCodeLength ; |
|
3596 break; |
|
3597 case EDataAVC: |
|
3598 break; |
|
3599 |
|
3600 default: |
|
3601 User::Panic(_L("CVideoPlayer"), EInternalAssertionFailure); |
|
3602 } |
|
3603 |
|
3604 // If the stream has ended, we have no blocks and no data for even a |
|
3605 // picture start code, we can't get a frame |
|
3606 if( iDataFormat == EDataH263 ) |
|
3607 { |
|
3608 if ( iStreamEnd && (iQueue->NumDataBlocks() == 0) && |
|
3609 (iCurrentFrameLength <= startCodeLength) && (iDataLength <= startCodeLength) ) |
|
3610 return EFalse; |
|
3611 } |
|
3612 else |
|
3613 { |
|
3614 if ( iStreamEnd && (iQueue->NumDataBlocks() == 0) && |
|
3615 (iCurrentFrameLength <= startCodeLength) && (iDataLength < startCodeLength) ) |
|
3616 return EFalse; |
|
3617 } |
|
3618 |
|
3619 switch(iDataFormat) |
|
3620 { |
|
3621 case EDataH263: |
|
3622 return ReadH263Frame(); |
|
3623 // break; |
|
3624 |
|
3625 case EDataMPEG4: |
|
3626 return ReadMPEG4Frame(); |
|
3627 // break; |
|
3628 |
|
3629 case EDataAVC: |
|
3630 return ReadAVCFrame(); |
|
3631 // break; |
|
3632 |
|
3633 default: |
|
3634 User::Panic(_L("CVideoProcessor"), EInternalAssertionFailure); |
|
3635 |
|
3636 } |
|
3637 |
|
3638 return ETrue; |
|
3639 } |
|
3640 |
|
3641 |
|
3642 // --------------------------------------------------------- |
|
3643 // CVideoProcessor::ReadH263Frame |
|
3644 // Reads a H.263 frame from input queue to internal buffer |
|
3645 // (other items were commented in a header). |
|
3646 // --------------------------------------------------------- |
|
3647 // |
|
3648 TBool CVideoProcessor::ReadH263Frame() |
|
3649 { |
|
3650 |
|
3651 VDASSERT( iDataFormat == EDataH263, 17 ); |
|
3652 |
|
3653 TInt offset = 0; |
|
3654 if ( iTiming == ETimeStamp ) |
|
3655 offset = 4; |
|
3656 |
|
3657 // There should be one PSC at the buffer start, |
|
3658 // and no other PSCs up to iDataLength |
|
3659 if ( (iDataLength >= KH263StartCodeLength) && |
|
3660 ((iDataBuffer[0] != 0) || (iDataBuffer[1] != 0) || ((iDataBuffer[2] & 0xfc) != 0x80)) ) |
|
3661 { |
|
3662 PRINT((_L("CVideoProcessor::ReadH263Frame() - no PSC in the start of the buffer"))) |
|
3663 if (iMonitor) |
|
3664 iMonitor->Error(KErrCorrupt); |
|
3665 return EFalse; |
|
3666 } |
|
3667 |
|
3668 if (iCurrentFrameLength < KH263StartCodeLength ) |
|
3669 iCurrentFrameLength = KH263StartCodeLength; |
|
3670 |
|
3671 TBool gotPSC = EFalse; |
|
3672 while (!gotPSC) |
|
3673 { |
|
3674 // If we don't have a block at the moment, get one and check if it |
|
3675 // has a new PSC |
|
3676 while (!iBlock) |
|
3677 { |
|
3678 if ((iBlock = iQueue->ReadBlock()) == NULL) |
|
3679 { |
|
3680 if (!iStreamEnd && !iProcessor->IsThumbnailInProgress()) |
|
3681 return EFalse; |
|
3682 |
|
3683 // No more blocks in the stream. If we have more data than |
|
3684 // just a PSC, use the remaining as the last frame. We'll |
|
3685 // append an End Of Stream (EOS) codeword to the stream end |
|
3686 // to keep the decoder happy |
|
3687 if (iDataLength <= 3) |
|
3688 return EFalse; |
|
3689 iCurrentFrameLength = iDataLength; |
|
3690 if (iBufferLength < (iDataLength+3)) |
|
3691 { |
|
3692 iBufferLength += 3; |
|
3693 TUint8* tmp = (TUint8*) User::ReAlloc(iDataBuffer, iBufferLength); |
|
3694 if ( !tmp ) |
|
3695 { |
|
3696 if (iMonitor) |
|
3697 iMonitor->Error(KErrNoMemory); |
|
3698 return EFalse; |
|
3699 } |
|
3700 iDataBuffer = tmp; |
|
3701 } |
|
3702 iDataBuffer[iCurrentFrameLength] = 0; |
|
3703 iDataBuffer[iCurrentFrameLength+1] = 0; |
|
3704 iDataBuffer[iCurrentFrameLength+2] = 0xfc; |
|
3705 iDataLength += 3; |
|
3706 return ETrue; |
|
3707 } |
|
3708 |
|
3709 iBlockPos = 0; |
|
3710 // Return empty blocks immediately |
|
3711 if ( iBlock->Length() == 0 ) |
|
3712 { |
|
3713 iQueue->ReturnBlock(iBlock); |
|
3714 iBlock = 0; |
|
3715 } |
|
3716 } |
|
3717 |
|
3718 // If we are at the start of a block, check if it begins with a PSC |
|
3719 if ( iBlockPos == 0 ) |
|
3720 { |
|
3721 if ( (iBlock->Length() > 2 + offset) && |
|
3722 ( ((*iBlock)[0+offset] == 0) && ((*iBlock)[1+offset] == 0) && (((*iBlock)[2+offset] & 0xfc) == 0x80) ) ) |
|
3723 { |
|
3724 gotPSC = ETrue; |
|
3725 iCurrentFrameLength = iDataLength; // timestamps not copied to buffer |
|
3726 |
|
3727 if (iTiming == ETimeStamp) |
|
3728 { |
|
3729 iBlockPos += offset; |
|
3730 } |
|
3731 } |
|
3732 else |
|
3733 { |
|
3734 PRINT((_L("CVideoProcessor::ReadH263Frame() - no PSC in the start of the buffer"))) |
|
3735 if (iMonitor) |
|
3736 iMonitor->Error( KErrCorrupt ); |
|
3737 return EFalse; |
|
3738 } |
|
3739 } |
|
3740 |
|
3741 // If we still have data in our current block, copy it to the buffer |
|
3742 // Make sure we have enough space |
|
3743 TUint copyBytes = iBlock->Length() - iBlockPos; |
|
3744 if (copyBytes) |
|
3745 { |
|
3746 while (iBufferLength < (iDataLength + copyBytes)) |
|
3747 { |
|
3748 // New size is 3/2ths of the old size, rounded up to the next |
|
3749 // full kilobyte |
|
3750 TUint newSize = (3 * iBufferLength) / 2; |
|
3751 newSize = (newSize + 1023) & (~1023); |
|
3752 |
|
3753 TUint8* tmp = (TUint8*) User::ReAlloc(iDataBuffer, newSize); |
|
3754 if (!tmp) |
|
3755 { |
|
3756 if (iMonitor) |
|
3757 iMonitor->Error(KErrNoMemory); |
|
3758 return EFalse; |
|
3759 } |
|
3760 iDataBuffer = tmp; |
|
3761 iBufferLength = newSize; |
|
3762 } |
|
3763 Mem::Copy(&iDataBuffer[iDataLength], iBlock->Ptr() + iBlockPos, |
|
3764 copyBytes); |
|
3765 iBlockPos += copyBytes; |
|
3766 iDataLength += copyBytes; |
|
3767 } |
|
3768 |
|
3769 // OK, block used, throw it away |
|
3770 VDASSERT(iBlock->Length() == (TInt)iBlockPos,16); |
|
3771 iQueue->ReturnBlock(iBlock); |
|
3772 iBlock = 0; |
|
3773 } |
|
3774 |
|
3775 return ETrue; |
|
3776 } |
|
3777 |
|
3778 |
|
3779 |
|
3780 // --------------------------------------------------------- |
|
3781 // CVideoProcessor::ReadMPEG4Frame |
|
3782 // Reads a MPEG-4 frame from input queue to internal buffer |
|
3783 // (other items were commented in a header). |
|
3784 // --------------------------------------------------------- |
|
3785 // |
|
3786 TBool CVideoProcessor::ReadMPEG4Frame() |
|
3787 { |
|
3788 |
|
3789 VDASSERT( (iDataFormat == EDataMPEG4 && iTiming == ETimeStamp), 17 ); |
|
3790 |
|
3791 // The following code assumes that there is one complete video frame |
|
3792 // in each input block. This is true for 3GP input streams. |
|
3793 |
|
3794 // get a new block if we don't have one |
|
3795 while (!iBlock) |
|
3796 { |
|
3797 if ((iBlock = iQueue->ReadBlock()) == NULL) |
|
3798 return EFalse; |
|
3799 |
|
3800 iBlockPos = 0; |
|
3801 // Return empty blocks immediately |
|
3802 if (iBlock->Length() == 0) |
|
3803 { |
|
3804 iQueue->ReturnBlock(iBlock); |
|
3805 iBlock = 0; |
|
3806 } |
|
3807 } |
|
3808 |
|
3809 // If we are at the start of a block, save timestamp |
|
3810 if (iBlockPos == 0) |
|
3811 { |
|
3812 //TUint* p = (TUint*)iBlock->Ptr(); |
|
3813 //AI: iRenderFrameTime = TInt64( (TUint)((*p)*1000) ); |
|
3814 iBlockPos += 4; // skip timestamp |
|
3815 } |
|
3816 |
|
3817 if (iFirstRead) |
|
3818 { |
|
3819 // allocate buffer for header |
|
3820 VDASSERT(!iDecoderSpecificInfo, 160); |
|
3821 iDecoderSpecificInfo = (HBufC8*) HBufC8::New(iProcessor->GetDecoderSpecificInfoSize()); |
|
3822 if (!iDecoderSpecificInfo) |
|
3823 { |
|
3824 iMonitor->Error(KErrNoMemory); |
|
3825 return EFalse; |
|
3826 } |
|
3827 |
|
3828 TPtr8 ptr(iDecoderSpecificInfo->Des()); |
|
3829 |
|
3830 // first copy already read bytes from iDataBuffer |
|
3831 ptr.Copy(iDataBuffer, iDataLength); |
|
3832 |
|
3833 TInt copyNow = iProcessor->GetDecoderSpecificInfoSize() - iDataLength; |
|
3834 iDataLength = 0; |
|
3835 |
|
3836 // then copy the rest from input buffer |
|
3837 ptr.Append(iBlock->Ptr() + iBlockPos, copyNow); |
|
3838 iBlockPos += copyNow; |
|
3839 iDecoderSpecificInfoSent = EFalse; |
|
3840 |
|
3841 iFirstRead = EFalse; |
|
3842 |
|
3843 } |
|
3844 |
|
3845 TUint copyBytes = iBlock->Length() - iBlockPos; |
|
3846 if (copyBytes) |
|
3847 { |
|
3848 // Make sure we have enough space |
|
3849 // +4 is for inserting a start code at the end of the frame |
|
3850 while (iBufferLength < (iDataLength + copyBytes + 4)) |
|
3851 { |
|
3852 // New size is 3/2ths of the old size, rounded up to the next |
|
3853 // full kilobyte |
|
3854 TUint newSize = (3 * iBufferLength) / 2; |
|
3855 newSize = (newSize + 1023) & (~1023); |
|
3856 TUint8* tmp = (TUint8*) User::ReAlloc(iDataBuffer, newSize); |
|
3857 if (!tmp) |
|
3858 { |
|
3859 if (iMonitor) |
|
3860 iMonitor->Error(KErrNoMemory); |
|
3861 return EFalse; |
|
3862 } |
|
3863 iDataBuffer = tmp; |
|
3864 iBufferLength = newSize; |
|
3865 } |
|
3866 Mem::Copy(&iDataBuffer[iDataLength], iBlock->Ptr() + iBlockPos, |
|
3867 copyBytes); |
|
3868 iBlockPos += copyBytes; |
|
3869 iDataLength += copyBytes; |
|
3870 } |
|
3871 |
|
3872 // OK, block used, throw it away |
|
3873 VDASSERT((iBlock->Length() == (TInt)iBlockPos),18); |
|
3874 iQueue->ReturnBlock(iBlock); |
|
3875 iBlock = 0; |
|
3876 |
|
3877 // check for VOS end code |
|
3878 if ( (iDataBuffer[0] == 0 ) && (iDataBuffer[1] == 0 ) && |
|
3879 (iDataBuffer[2] == 0x01) && (iDataBuffer[3] == 0xb1) ) |
|
3880 return EFalse; |
|
3881 |
|
3882 // insert VOP start code at the end, the decoder needs it |
|
3883 iDataBuffer[iDataLength++] = 0; |
|
3884 iDataBuffer[iDataLength++] = 0; |
|
3885 iDataBuffer[iDataLength++] = 0x01; |
|
3886 iDataBuffer[iDataLength++] = 0xb6; |
|
3887 iCurrentFrameLength = iDataLength; |
|
3888 |
|
3889 // we have a complete frame |
|
3890 return ETrue; |
|
3891 |
|
3892 } |
|
3893 |
|
3894 |
|
3895 // --------------------------------------------------------- |
|
3896 // CVideoProcessor::ReadAVCFrame |
|
3897 // Reads an AVC frame from input queue to internal buffer |
|
3898 // (other items were commented in a header). |
|
3899 // --------------------------------------------------------- |
|
3900 // |
|
3901 TBool CVideoProcessor::ReadAVCFrame() |
|
3902 { |
|
3903 |
|
3904 VDASSERT( iDataFormat == EDataAVC, 17 ); |
|
3905 |
|
3906 // get a new block if we don't have one |
|
3907 while (!iBlock) |
|
3908 { |
|
3909 if ((iBlock = iQueue->ReadBlock()) == NULL) |
|
3910 return EFalse; |
|
3911 |
|
3912 iBlockPos = 0; |
|
3913 // Return empty blocks immediately |
|
3914 if (iBlock->Length() == 0) |
|
3915 { |
|
3916 iQueue->ReturnBlock(iBlock); |
|
3917 iBlock = 0; |
|
3918 } |
|
3919 } |
|
3920 |
|
3921 // skip 4 bytes for the timestamp |
|
3922 TInt skip = 4; |
|
3923 // TInt numSPS = 0; |
|
3924 // TInt numPPS = 0; |
|
3925 |
|
3926 // set this to point to the start of frame length field |
|
3927 TUint8* frameLenPtr = const_cast<TUint8*>(iBlock->Ptr()) + skip; |
|
3928 // how much space needed for frame data |
|
3929 TInt frameLen = 0; |
|
3930 |
|
3931 TInt totalFrameLen = iBlock->Length() - skip; |
|
3932 |
|
3933 if (iFirstRead) |
|
3934 { |
|
3935 TInt index = skip + 4; // Skip timestamp + version etc. |
|
3936 TUint8* temp = const_cast<TUint8*>(iBlock->Ptr()); |
|
3937 |
|
3938 // get no. bytes used for length |
|
3939 iFrameLengthBytes = ( temp[index] & 0x3 ) + 1; |
|
3940 |
|
3941 // save DCR |
|
3942 VDASSERT(!iDecoderSpecificInfo, 160); |
|
3943 |
|
3944 iDecoderSpecificInfo = (HBufC8*) HBufC8::New(iProcessor->GetDecoderSpecificInfoSize()); |
|
3945 if (!iDecoderSpecificInfo) |
|
3946 { |
|
3947 iMonitor->Error(KErrNoMemory); |
|
3948 return EFalse; |
|
3949 } |
|
3950 |
|
3951 TPtr8 ptr(iDecoderSpecificInfo->Des()); |
|
3952 ptr.Copy(iBlock->Ptr() + skip, iProcessor->GetDecoderSpecificInfoSize()); |
|
3953 iDecoderSpecificInfoSent = EFalse; |
|
3954 |
|
3955 // advance pointer over info to point to length field |
|
3956 frameLenPtr += iProcessor->GetDecoderSpecificInfoSize(); |
|
3957 |
|
3958 // add to frame len. since it is used to calculate the minimum buffer size |
|
3959 //frameLen += iProcessor->GetDecoderSpecificInfoSize(); |
|
3960 |
|
3961 totalFrameLen -= iProcessor->GetDecoderSpecificInfoSize(); |
|
3962 skip += iProcessor->GetDecoderSpecificInfoSize(); |
|
3963 |
|
3964 iFirstRead = EFalse; |
|
3965 } |
|
3966 |
|
3967 |
|
3968 |
|
3969 TInt numSliceNalUnits = 0; |
|
3970 while (frameLen < totalFrameLen) |
|
3971 { |
|
3972 TInt nalLen = 0; |
|
3973 switch (iFrameLengthBytes) |
|
3974 { |
|
3975 case 1: |
|
3976 nalLen = frameLenPtr[0] + 1; // +1 for length field |
|
3977 break; |
|
3978 |
|
3979 case 2: |
|
3980 nalLen = (frameLenPtr[0] << 8) + frameLenPtr[1] + 2; // +2 for length field |
|
3981 break; |
|
3982 |
|
3983 case 3: |
|
3984 nalLen = (frameLenPtr[0] << 16) + (frameLenPtr[1] << 8) + |
|
3985 frameLenPtr[2] + 3; // +3 for length field |
|
3986 break; |
|
3987 |
|
3988 case 4: |
|
3989 nalLen = (frameLenPtr[0] << 24) + (frameLenPtr[1] << 16) + |
|
3990 (frameLenPtr[2] << 8) + frameLenPtr[3] + 4; // +4 for length field |
|
3991 break; |
|
3992 |
|
3993 default: |
|
3994 if (iMonitor) |
|
3995 iMonitor->Error(KErrCorrupt); |
|
3996 return EFalse; |
|
3997 } |
|
3998 frameLenPtr += nalLen; |
|
3999 frameLen += nalLen; |
|
4000 numSliceNalUnits++; |
|
4001 } |
|
4002 |
|
4003 if ( iFrameLengthBytes != 4 ) |
|
4004 frameLen += numSliceNalUnits * ( (iFrameLengthBytes == 1) ? 3 : 2 ); |
|
4005 |
|
4006 // reserve space for alignment |
|
4007 TInt addBytes = (frameLen % 4 != 0) * ( 4 - (frameLen % 4) ); |
|
4008 |
|
4009 // reserve space for slice NAL unit offset and size fields |
|
4010 addBytes += (numSliceNalUnits * 8); |
|
4011 |
|
4012 // reserve space for number of NAL units (4) |
|
4013 addBytes += 4; |
|
4014 |
|
4015 // Make sure we have enough space |
|
4016 while (iBufferLength < (iDataLength + frameLen + addBytes)) |
|
4017 { |
|
4018 // New size is 3/2ths of the old size, rounded up to the next |
|
4019 // full kilobyte |
|
4020 TUint newSize = (3 * iBufferLength) / 2; |
|
4021 newSize = (newSize + 1023) & (~1023); |
|
4022 TUint8* tmp = (TUint8*) User::ReAlloc(iDataBuffer, newSize); |
|
4023 if (!tmp) |
|
4024 { |
|
4025 iMonitor->Error(KErrNoMemory); |
|
4026 return EFalse; |
|
4027 } |
|
4028 |
|
4029 iDataBuffer = tmp; |
|
4030 iBufferLength = newSize; |
|
4031 } |
|
4032 |
|
4033 iBlockPos += skip; |
|
4034 |
|
4035 if (iFrameLengthBytes == 4) |
|
4036 { |
|
4037 // just copy directly, no need to change length field |
|
4038 Mem::Copy(&iDataBuffer[iDataLength], iBlock->Ptr() + skip, frameLen); |
|
4039 iDataLength += frameLen; |
|
4040 iBlockPos += frameLen; |
|
4041 } |
|
4042 else |
|
4043 { |
|
4044 // have to change length field for each NAL |
|
4045 TUint8* srcPtr = const_cast<TUint8*>(iBlock->Ptr()) + skip; |
|
4046 while (numSliceNalUnits--) |
|
4047 { |
|
4048 // read length |
|
4049 TInt nalLen = 0; |
|
4050 switch (iFrameLengthBytes) |
|
4051 { |
|
4052 case 1: |
|
4053 nalLen = srcPtr[0]; |
|
4054 srcPtr += 1; // skip length field |
|
4055 iBlockPos += 1; |
|
4056 break; |
|
4057 |
|
4058 case 2: |
|
4059 nalLen = (srcPtr[0] << 8) + srcPtr[1]; |
|
4060 srcPtr += 2; // skip length field |
|
4061 iBlockPos += 2; |
|
4062 break; |
|
4063 |
|
4064 case 3: |
|
4065 nalLen = (srcPtr[0] << 16) + (srcPtr[1] << 8) + srcPtr[2]; |
|
4066 srcPtr += 3; // skip length field |
|
4067 iBlockPos += 3; |
|
4068 break; |
|
4069 |
|
4070 default: |
|
4071 if (iMonitor) |
|
4072 iMonitor->Error(KErrCorrupt); |
|
4073 return EFalse; |
|
4074 } |
|
4075 |
|
4076 // code length with 4 bytes |
|
4077 iDataBuffer[iDataLength] = TUint8((nalLen >> 24) & 0xff); |
|
4078 iDataBuffer[iDataLength + 1] = TUint8((nalLen >> 16) & 0xff); |
|
4079 iDataBuffer[iDataLength + 2] = TUint8((nalLen >> 8) & 0xff); |
|
4080 iDataBuffer[iDataLength + 3] = TUint8(nalLen & 0xff); |
|
4081 iDataLength += 4; |
|
4082 // copy NAL data |
|
4083 Mem::Copy(&iDataBuffer[iDataLength], srcPtr, nalLen); |
|
4084 iDataLength += nalLen; |
|
4085 srcPtr += nalLen; |
|
4086 iBlockPos += nalLen; |
|
4087 } |
|
4088 } |
|
4089 |
|
4090 // OK, block used, throw it away |
|
4091 VDASSERT((iBlock->Length() == (TInt)iBlockPos),18); |
|
4092 iQueue->ReturnBlock(iBlock); |
|
4093 iBlock = 0; |
|
4094 |
|
4095 iCurrentFrameLength = iDataLength; |
|
4096 |
|
4097 // we have a complete frame |
|
4098 return ETrue; |
|
4099 |
|
4100 } |
|
4101 |
|
4102 |
|
4103 |
|
4104 // --------------------------------------------------------- |
|
4105 // CVideoProcessor::DetermineClipTransitionParameters |
|
4106 // Sets transition frame parameters |
|
4107 // (other items were commented in a header). |
|
4108 // --------------------------------------------------------- |
|
4109 // |
|
4110 TInt CVideoProcessor::DetermineClipTransitionParameters(TInt& aTransitionEffect, |
|
4111 TInt& aStartOfClipTransition, |
|
4112 TInt& aEndOfClipTransition, |
|
4113 TTransitionColor& aStartTransitionColor, |
|
4114 TTransitionColor& aEndTransitionColor) |
|
4115 { |
|
4116 TInt error=KErrNone; |
|
4117 // find if transition effect is to be applied |
|
4118 TInt numberOfVideoClips = iProcessor->GetNumberOfVideoClips(); |
|
4119 TInt videoClipNumber = iProcessor->GetVideoClipNumber(); |
|
4120 TVedStartTransitionEffect startTransitionEffect = iProcessor->GetStartTransitionEffect(); |
|
4121 TVedEndTransitionEffect endTransitionEffect = iProcessor->GetEndTransitionEffect(); |
|
4122 TVedMiddleTransitionEffect middleTransitionEffect = iProcessor->GetMiddleTransitionEffect(); |
|
4123 TVedMiddleTransitionEffect previousMiddleTransitionEffect = iProcessor->GetPreviousMiddleTransitionEffect(); |
|
4124 |
|
4125 // is transition effect to be applied anywhere in the movie? |
|
4126 if(startTransitionEffect==EVedStartTransitionEffectNone && |
|
4127 middleTransitionEffect==EVedMiddleTransitionEffectNone && |
|
4128 endTransitionEffect==EVedEndTransitionEffectNone && |
|
4129 previousMiddleTransitionEffect==EVedMiddleTransitionEffectNone) |
|
4130 aTransitionEffect=0; |
|
4131 else |
|
4132 aTransitionEffect=1; |
|
4133 // where is the transition effect to be applied - beginning, end or both? |
|
4134 if(aTransitionEffect) |
|
4135 { |
|
4136 // if first video clip |
|
4137 if(videoClipNumber==0) |
|
4138 { |
|
4139 switch(startTransitionEffect) |
|
4140 { |
|
4141 default: |
|
4142 case EVedStartTransitionEffectNone: |
|
4143 case EVedStartTransitionEffectLast: |
|
4144 aStartOfClipTransition=0; |
|
4145 aStartTransitionColor = EColorNone; |
|
4146 break; |
|
4147 case EVedStartTransitionEffectFadeFromBlack: |
|
4148 aStartOfClipTransition=1; |
|
4149 aStartTransitionColor = EColorBlack; |
|
4150 break; |
|
4151 case EVedStartTransitionEffectFadeFromWhite: |
|
4152 aStartOfClipTransition=1; |
|
4153 aStartTransitionColor = EColorWhite; |
|
4154 break; |
|
4155 } |
|
4156 // do we need transition at the end of this clip? |
|
4157 if(videoClipNumber==numberOfVideoClips-1) // last clip? |
|
4158 { |
|
4159 switch(endTransitionEffect) |
|
4160 { |
|
4161 default: |
|
4162 case EVedEndTransitionEffectNone: |
|
4163 case EVedEndTransitionEffectLast: |
|
4164 aEndOfClipTransition=0; |
|
4165 aEndTransitionColor = EColorNone; |
|
4166 break; |
|
4167 case EVedEndTransitionEffectFadeToBlack: |
|
4168 aEndOfClipTransition=1; |
|
4169 aEndTransitionColor = EColorBlack; |
|
4170 break; |
|
4171 case EVedEndTransitionEffectFadeToWhite: |
|
4172 aEndOfClipTransition=1; |
|
4173 aEndTransitionColor = EColorWhite; |
|
4174 break; |
|
4175 } |
|
4176 } |
|
4177 else // middle clip |
|
4178 { |
|
4179 switch(middleTransitionEffect) |
|
4180 { |
|
4181 default: |
|
4182 case EVedMiddleTransitionEffectNone: |
|
4183 case EVedMiddleTransitionEffectLast: |
|
4184 aEndOfClipTransition=0; |
|
4185 aEndTransitionColor = EColorNone; |
|
4186 break; |
|
4187 case EVedMiddleTransitionEffectDipToBlack: |
|
4188 aEndOfClipTransition=1; |
|
4189 aEndTransitionColor = EColorBlack; |
|
4190 break; |
|
4191 case EVedMiddleTransitionEffectDipToWhite: |
|
4192 aEndOfClipTransition=1; |
|
4193 aEndTransitionColor = EColorWhite; |
|
4194 break; |
|
4195 //change |
|
4196 case EVedMiddleTransitionEffectCrossfade: |
|
4197 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectCrossfade); |
|
4198 aEndTransitionColor = EColorTransition; |
|
4199 break; |
|
4200 case EVedMiddleTransitionEffectWipeLeftToRight: |
|
4201 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectWipeLeftToRight); |
|
4202 aEndTransitionColor = EColorTransition; |
|
4203 break; |
|
4204 case EVedMiddleTransitionEffectWipeRightToLeft: |
|
4205 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectWipeRightToLeft); |
|
4206 aEndTransitionColor = EColorTransition; |
|
4207 break; |
|
4208 case EVedMiddleTransitionEffectWipeTopToBottom: |
|
4209 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectWipeTopToBottom); |
|
4210 aEndTransitionColor = EColorTransition; |
|
4211 break; |
|
4212 case EVedMiddleTransitionEffectWipeBottomToTop: |
|
4213 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectWipeBottomToTop); |
|
4214 aEndTransitionColor = EColorTransition; |
|
4215 break; |
|
4216 } |
|
4217 } |
|
4218 } |
|
4219 // else its the middle or last clip |
|
4220 else |
|
4221 { |
|
4222 // do we need transition at the beginning of this clip? |
|
4223 switch(previousMiddleTransitionEffect) |
|
4224 { |
|
4225 default: |
|
4226 case EVedMiddleTransitionEffectNone: |
|
4227 case EVedMiddleTransitionEffectLast: |
|
4228 aStartOfClipTransition=0; |
|
4229 aStartTransitionColor = EColorNone; |
|
4230 break; |
|
4231 case EVedMiddleTransitionEffectDipToBlack: |
|
4232 aStartOfClipTransition=1; |
|
4233 aStartTransitionColor = EColorBlack; |
|
4234 break; |
|
4235 case EVedMiddleTransitionEffectDipToWhite: |
|
4236 aStartOfClipTransition=1; |
|
4237 aStartTransitionColor = EColorWhite; |
|
4238 break; |
|
4239 |
|
4240 case EVedMiddleTransitionEffectCrossfade: |
|
4241 aStartOfClipTransition=TInt(EVedMiddleTransitionEffectCrossfade); |
|
4242 aStartTransitionColor = EColorTransition; |
|
4243 break; |
|
4244 case EVedMiddleTransitionEffectWipeLeftToRight: |
|
4245 aStartOfClipTransition=TInt(EVedMiddleTransitionEffectWipeLeftToRight); |
|
4246 aStartTransitionColor = EColorTransition; |
|
4247 break; |
|
4248 case EVedMiddleTransitionEffectWipeRightToLeft: |
|
4249 aStartOfClipTransition=TInt(EVedMiddleTransitionEffectWipeRightToLeft); |
|
4250 aStartTransitionColor = EColorTransition; |
|
4251 break; |
|
4252 case EVedMiddleTransitionEffectWipeTopToBottom: |
|
4253 aStartOfClipTransition=TInt(EVedMiddleTransitionEffectWipeTopToBottom); |
|
4254 aStartTransitionColor = EColorTransition; |
|
4255 break; |
|
4256 case EVedMiddleTransitionEffectWipeBottomToTop: |
|
4257 aStartOfClipTransition=TInt(EVedMiddleTransitionEffectWipeBottomToTop); |
|
4258 aStartTransitionColor = EColorTransition; |
|
4259 break; |
|
4260 } |
|
4261 // do we need transition at the end of this clip? |
|
4262 if(videoClipNumber==numberOfVideoClips-1) // last clip? |
|
4263 { |
|
4264 switch(endTransitionEffect) |
|
4265 { |
|
4266 default: |
|
4267 case EVedEndTransitionEffectNone: |
|
4268 case EVedEndTransitionEffectLast: |
|
4269 aEndOfClipTransition=0; |
|
4270 aEndTransitionColor = EColorNone; |
|
4271 break; |
|
4272 case EVedEndTransitionEffectFadeToBlack: |
|
4273 aEndOfClipTransition=1; |
|
4274 aEndTransitionColor = EColorBlack; |
|
4275 break; |
|
4276 case EVedEndTransitionEffectFadeToWhite: |
|
4277 aEndOfClipTransition=1; |
|
4278 aEndTransitionColor = EColorWhite; |
|
4279 break; |
|
4280 } |
|
4281 } |
|
4282 else // middle clip |
|
4283 { |
|
4284 switch(middleTransitionEffect) |
|
4285 { |
|
4286 default: |
|
4287 case EVedMiddleTransitionEffectNone: |
|
4288 case EVedMiddleTransitionEffectLast: |
|
4289 aEndOfClipTransition=0; |
|
4290 aEndTransitionColor = EColorNone; |
|
4291 break; |
|
4292 case EVedMiddleTransitionEffectDipToBlack: |
|
4293 aEndOfClipTransition=1; |
|
4294 aEndTransitionColor = EColorBlack; |
|
4295 break; |
|
4296 case EVedMiddleTransitionEffectDipToWhite: |
|
4297 aEndOfClipTransition=1; |
|
4298 aEndTransitionColor = EColorWhite; |
|
4299 break; |
|
4300 //change |
|
4301 case EVedMiddleTransitionEffectCrossfade: |
|
4302 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectCrossfade); |
|
4303 aEndTransitionColor = EColorTransition; |
|
4304 break; |
|
4305 case EVedMiddleTransitionEffectWipeLeftToRight: |
|
4306 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectWipeLeftToRight); |
|
4307 aEndTransitionColor = EColorTransition; |
|
4308 break; |
|
4309 case EVedMiddleTransitionEffectWipeRightToLeft: |
|
4310 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectWipeRightToLeft); |
|
4311 aEndTransitionColor = EColorTransition; |
|
4312 break; |
|
4313 case EVedMiddleTransitionEffectWipeTopToBottom: |
|
4314 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectWipeTopToBottom); |
|
4315 aEndTransitionColor = EColorTransition; |
|
4316 break; |
|
4317 case EVedMiddleTransitionEffectWipeBottomToTop: |
|
4318 aEndOfClipTransition=TInt(EVedMiddleTransitionEffectWipeBottomToTop); |
|
4319 aEndTransitionColor = EColorTransition; |
|
4320 break; |
|
4321 } |
|
4322 } |
|
4323 } |
|
4324 } |
|
4325 return error; |
|
4326 } |
|
4327 |
|
4328 |
|
4329 // --------------------------------------------------------- |
|
4330 // CVideoProcessor::GetNumberOfTransitionFrames |
|
4331 // Calculate the number of transition frames |
|
4332 // (other items were commented in a header). |
|
4333 // --------------------------------------------------------- |
|
4334 // |
|
4335 void CVideoProcessor::GetNumberOfTransitionFrames(TTimeIntervalMicroSeconds aStartCutTime, |
|
4336 TTimeIntervalMicroSeconds aEndCutTime) |
|
4337 { |
|
4338 |
|
4339 TInt startFrameIndex = iProcessor->GetVideoFrameIndex(aStartCutTime); |
|
4340 // the following is because binary search gives us frame with timestamp < startCutTime |
|
4341 // this frame would be out of range for movie |
|
4342 if(startFrameIndex > 0 && startFrameIndex < iNumberOfFrames-1) |
|
4343 startFrameIndex++; |
|
4344 TInt endFrameIndex = iProcessor->GetVideoFrameIndex(aEndCutTime); |
|
4345 // adjust frame indices for cut video clip |
|
4346 startFrameIndex -= iProcessor->GetStartFrameIndex(); |
|
4347 endFrameIndex -= iProcessor->GetStartFrameIndex(); |
|
4348 if(startFrameIndex < 0) |
|
4349 startFrameIndex = 0; |
|
4350 if(endFrameIndex < 0) |
|
4351 endFrameIndex = 0; |
|
4352 if(endFrameIndex<startFrameIndex) |
|
4353 endFrameIndex = startFrameIndex; |
|
4354 |
|
4355 // determine the total number of included frames in the clip |
|
4356 iNumberOfIncludedFrames = endFrameIndex-startFrameIndex+1; |
|
4357 |
|
4358 // make sure there are enough frames to apply transition |
|
4359 // for transition at both ends |
|
4360 if(iStartOfClipTransition && iEndOfClipTransition) |
|
4361 { |
|
4362 |
|
4363 if ( iStartTransitionColor == EColorTransition && |
|
4364 iEndTransitionColor == EColorTransition ) |
|
4365 { |
|
4366 iStartNumberOfTransitionFrames >>= 1; |
|
4367 |
|
4368 // if there are not enough frames saved from previous |
|
4369 // clip, the transition must be shortened accordingly |
|
4370 if (iProcessor->GetNumberOfSavedFrames() < iStartNumberOfTransitionFrames) |
|
4371 iStartNumberOfTransitionFrames = iProcessor->GetNumberOfSavedFrames(); |
|
4372 |
|
4373 iEndNumberOfTransitionFrames >>= 1; |
|
4374 if ( iNumberOfIncludedFrames < (iStartNumberOfTransitionFrames + iEndNumberOfTransitionFrames) ) |
|
4375 { |
|
4376 iStartNumberOfTransitionFrames = iNumberOfIncludedFrames >> 1; |
|
4377 iEndNumberOfTransitionFrames = iNumberOfIncludedFrames - iStartNumberOfTransitionFrames; |
|
4378 } |
|
4379 } |
|
4380 else |
|
4381 { |
|
4382 if ( iStartTransitionColor == EColorTransition ) |
|
4383 { |
|
4384 iStartNumberOfTransitionFrames >>= 1; |
|
4385 |
|
4386 // if there are not enough frames saved from previous |
|
4387 // clip, the transition must be shortened accordingly |
|
4388 if (iProcessor->GetNumberOfSavedFrames() < iStartNumberOfTransitionFrames) |
|
4389 iStartNumberOfTransitionFrames = iProcessor->GetNumberOfSavedFrames(); |
|
4390 } |
|
4391 |
|
4392 if ( iEndTransitionColor == EColorTransition ) |
|
4393 iEndNumberOfTransitionFrames >>= 1; |
|
4394 |
|
4395 if ( iNumberOfIncludedFrames < (iStartNumberOfTransitionFrames + iEndNumberOfTransitionFrames) ) |
|
4396 { |
|
4397 if ( iStartTransitionColor == EColorTransition ) |
|
4398 { |
|
4399 if ( ( iNumberOfIncludedFrames >> 1 ) > iStartNumberOfTransitionFrames ) |
|
4400 { |
|
4401 iEndNumberOfTransitionFrames = iNumberOfIncludedFrames - iStartNumberOfTransitionFrames; |
|
4402 } |
|
4403 else |
|
4404 { |
|
4405 iStartNumberOfTransitionFrames = iNumberOfIncludedFrames >> 1; |
|
4406 iEndNumberOfTransitionFrames = iNumberOfIncludedFrames - iStartNumberOfTransitionFrames; |
|
4407 } |
|
4408 } |
|
4409 else if ( iEndTransitionColor == EColorTransition ) |
|
4410 { |
|
4411 if ( ( iNumberOfIncludedFrames >> 1 ) > iEndNumberOfTransitionFrames ) |
|
4412 { |
|
4413 iStartNumberOfTransitionFrames = iNumberOfIncludedFrames - iEndNumberOfTransitionFrames; |
|
4414 } |
|
4415 else |
|
4416 { |
|
4417 iStartNumberOfTransitionFrames = iNumberOfIncludedFrames >> 1; |
|
4418 iEndNumberOfTransitionFrames = iNumberOfIncludedFrames - iStartNumberOfTransitionFrames; |
|
4419 } |
|
4420 } |
|
4421 else |
|
4422 { |
|
4423 iStartNumberOfTransitionFrames = iNumberOfIncludedFrames >> 1; |
|
4424 iEndNumberOfTransitionFrames = iNumberOfIncludedFrames - iStartNumberOfTransitionFrames; |
|
4425 } |
|
4426 } |
|
4427 } |
|
4428 } |
|
4429 // if transition is at one end only |
|
4430 else |
|
4431 { |
|
4432 if ( iStartOfClipTransition ) |
|
4433 { |
|
4434 iEndNumberOfTransitionFrames = 0; |
|
4435 if ( iStartTransitionColor == EColorTransition ) |
|
4436 { |
|
4437 iStartNumberOfTransitionFrames >>= 1; |
|
4438 |
|
4439 // if there are not enough frames saved from previous |
|
4440 // clip, the transition must be shortened accordingly |
|
4441 if (iProcessor->GetNumberOfSavedFrames() < iStartNumberOfTransitionFrames) |
|
4442 iStartNumberOfTransitionFrames = iProcessor->GetNumberOfSavedFrames(); |
|
4443 } |
|
4444 |
|
4445 if ( iNumberOfIncludedFrames < iStartNumberOfTransitionFrames ) |
|
4446 { |
|
4447 iStartNumberOfTransitionFrames = iNumberOfIncludedFrames; |
|
4448 } |
|
4449 } |
|
4450 else |
|
4451 { |
|
4452 iStartNumberOfTransitionFrames = 0; |
|
4453 if ( iEndTransitionColor == EColorTransition ) |
|
4454 { |
|
4455 iEndNumberOfTransitionFrames >>= 1; |
|
4456 } |
|
4457 if ( iNumberOfIncludedFrames < iEndNumberOfTransitionFrames ) |
|
4458 { |
|
4459 iEndNumberOfTransitionFrames = iNumberOfIncludedFrames; |
|
4460 } |
|
4461 } |
|
4462 } |
|
4463 // fetch the last Intra before transition begins. |
|
4464 // should be done after the cutting as well. |
|
4465 |
|
4466 iLastIntraFrameBeforeTransition=0; |
|
4467 if(iNumberOfIncludedFrames > 2) //so that we could loop to find the last intra. |
|
4468 { |
|
4469 TInt i; |
|
4470 TInt j=iProcessor->GetStartFrameIndex(); // processor needs frame index from beginning of clip |
|
4471 for(i=endFrameIndex-iEndNumberOfTransitionFrames; i>=startFrameIndex;i--) |
|
4472 { |
|
4473 if(iProcessor->GetVideoFrameType(i+j) == 1) // absolute index needed here! |
|
4474 { |
|
4475 iLastIntraFrameBeforeTransition=i; |
|
4476 break; |
|
4477 } |
|
4478 } |
|
4479 } |
|
4480 |
|
4481 } |
|
4482 |
|
4483 // --------------------------------------------------------- |
|
4484 // CVideoProcessor::SetTransitionFrameParams |
|
4485 // Set parameters for a transition frame |
|
4486 // (other items were commented in a header). |
|
4487 // --------------------------------------------------------- |
|
4488 // |
|
4489 void CVideoProcessor::SetTransitionFrameParams(TInt aIncludedFrameNumber, TBool& aDecodeFrame) |
|
4490 { |
|
4491 |
|
4492 // if transition is to be applied at the beginning of the clip |
|
4493 if(iStartOfClipTransition) |
|
4494 { |
|
4495 iFirstFrameAfterTransition = EFalse; |
|
4496 // this is for start-of-clip transition |
|
4497 if(aIncludedFrameNumber < iStartNumberOfTransitionFrames) |
|
4498 { |
|
4499 // if its first transition frame |
|
4500 if(aIncludedFrameNumber == 0) |
|
4501 { |
|
4502 iFirstTransitionFrame = 1; |
|
4503 iTransitionFrameNumber = 0; |
|
4504 } |
|
4505 else |
|
4506 { |
|
4507 iTransitionFrameNumber++; |
|
4508 } |
|
4509 |
|
4510 if ( iStartTransitionColor == EColorTransition ) |
|
4511 { |
|
4512 // ignore this transition if the previous clip has less transition frames |
|
4513 // than this clip's transition frames |
|
4514 if ( iTransitionFrameNumber < ( iProcessor->NumberOfTransition() - iTransitionFrameNumber ) ) |
|
4515 { |
|
4516 iTransitionFrame = 1; |
|
4517 iTransitionPosition = EPositionStartOfClip; |
|
4518 aDecodeFrame = ETrue; |
|
4519 iTransitionColor = iStartTransitionColor; |
|
4520 } |
|
4521 else |
|
4522 { |
|
4523 iPreviousFrameIncluded = EFalse; |
|
4524 } |
|
4525 } |
|
4526 else |
|
4527 { |
|
4528 iTransitionFrame = 1; |
|
4529 iTransitionPosition = EPositionStartOfClip; |
|
4530 aDecodeFrame = EFalse; |
|
4531 iTransitionColor = iStartTransitionColor; |
|
4532 } |
|
4533 |
|
4534 } |
|
4535 else |
|
4536 { |
|
4537 // if this is first frame after transition, we need to encode it as intra |
|
4538 // treat/simulate it as if its the start of the cut point. |
|
4539 if(aIncludedFrameNumber == iStartNumberOfTransitionFrames) |
|
4540 { |
|
4541 iFirstFrameAfterTransition = ETrue; |
|
4542 iPreviousFrameIncluded = EFalse; |
|
4543 } |
|
4544 } |
|
4545 } |
|
4546 |
|
4547 // if transition is to be applied at the end of the clip |
|
4548 if(iEndOfClipTransition && iTransitionFrame == 0) |
|
4549 { |
|
4550 // this is for end-of-clip transition |
|
4551 if(aIncludedFrameNumber >= iNumberOfIncludedFrames - iEndNumberOfTransitionFrames) |
|
4552 { |
|
4553 // if its first transition frame |
|
4554 if(aIncludedFrameNumber == iNumberOfIncludedFrames - iEndNumberOfTransitionFrames) |
|
4555 { |
|
4556 iFirstTransitionFrame = 1; |
|
4557 iTransitionFrameNumber = 0; |
|
4558 } |
|
4559 else |
|
4560 { |
|
4561 iTransitionFrameNumber++; |
|
4562 } |
|
4563 |
|
4564 if ( iEndTransitionColor == EColorTransition ) |
|
4565 { |
|
4566 // get the next clip's start transition information |
|
4567 GetNextClipTransitionInfo(); |
|
4568 // if next clip's start transition number is less than current clip's |
|
4569 // end transition number, then DO NOT treat current frame as the |
|
4570 // the transition frame |
|
4571 if ( ( iEndNumberOfTransitionFrames - iTransitionFrameNumber ) <= iNextTransitionNumber ) |
|
4572 { |
|
4573 iTransitionFrame = 1; |
|
4574 iTransitionPosition = EPositionEndOfClip; |
|
4575 aDecodeFrame = ETrue; |
|
4576 iTransitionColor = iEndTransitionColor; |
|
4577 } |
|
4578 } |
|
4579 else |
|
4580 { |
|
4581 iTransitionFrame = 1; |
|
4582 iTransitionPosition = EPositionEndOfClip; |
|
4583 aDecodeFrame = EFalse; |
|
4584 iTransitionColor = iEndTransitionColor; |
|
4585 } |
|
4586 } |
|
4587 else |
|
4588 { |
|
4589 // if this is first frame, we need to start decoding from here |
|
4590 // treat/simulate it as if its the nearest preceding intra frame. |
|
4591 if(iFrameNumber >= iLastIntraFrameBeforeTransition) |
|
4592 aDecodeFrame = ETrue; |
|
4593 |
|
4594 // In AVC case, if there is also starting transition, decode |
|
4595 // all frames after that since frame numbering must be consistent |
|
4596 // for AVC decoding to work |
|
4597 if (iDataFormat == EDataAVC && iStartOfClipTransition) |
|
4598 aDecodeFrame = ETrue; |
|
4599 |
|
4600 } // end-of-clip transition |
|
4601 } |
|
4602 |
|
4603 } |
|
4604 |
|
4605 |
|
4606 |
|
4607 // --------------------------------------------------------- |
|
4608 // CVideoProcessor::ApplyFadingTransitionEffect |
|
4609 // Applies fading transition effect for a YUV frame |
|
4610 // (other items were commented in a header). |
|
4611 // --------------------------------------------------------- |
|
4612 // |
|
4613 |
|
4614 void CVideoProcessor::ApplyFadingTransitionEffect(TUint8* aYUVPtr, |
|
4615 TTransitionPosition aTransitionPosition, |
|
4616 TTransitionColor aTransitionColor, |
|
4617 TInt aTransitionFramenumber) |
|
4618 { |
|
4619 TInt i; |
|
4620 |
|
4621 TSize movieSize = iProcessor->GetMovieResolution(); |
|
4622 TInt yLength = movieSize.iWidth * movieSize.iHeight; |
|
4623 |
|
4624 TInt uLength = yLength>>2; |
|
4625 TUint8* yFrame = (TUint8*)aYUVPtr; |
|
4626 TUint8* uFrame = (TUint8*)(yFrame+yLength); |
|
4627 TUint8* vFrame = (TUint8*)(uFrame+uLength); |
|
4628 TInt position; |
|
4629 TChar chr; |
|
4630 TInt value; |
|
4631 TPtr8 ptr(0,0); |
|
4632 |
|
4633 // look-up tables to avoid floating point operations due to fractional weighting |
|
4634 // corresponding to 0.1, 26 values quantized to 8 |
|
4635 const TUint8 quantTable1[8] = { |
|
4636 4, 7, 10, 13, 16, 19, 22, 24 }; |
|
4637 // corresponding to 0.2, 52 values quantized to 16 |
|
4638 const TUint8 quantTable2[16] = { |
|
4639 4, 8, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 50 }; |
|
4640 // corresponding to 0.3, 77 values quantized to 16 |
|
4641 const TUint8 quantTable3[16] = { |
|
4642 5, 10, 15, 20, 24, 29, 33, 38, 42, 47, 51, 56, 61, 66, 71, 75 }; |
|
4643 // corresponding to 0.4, 103 values quantized to 32 |
|
4644 const TUint8 quantTable4[32] = { |
|
4645 5, 10, 14, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, |
|
4646 57, 60, 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 93, 96, 99, 101 }; |
|
4647 // corresponding to 0.5, 128 values quantized to 32 |
|
4648 const TUint8 quantTable5[32] = { |
|
4649 3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63, |
|
4650 67, 71, 75, 79, 83, 87, 91, 95, 99, 103, 107, 111, 115, 119, 123, 126 }; |
|
4651 // corresponding to 0.6, 154 values quantized to 32 |
|
4652 const TUint8 quantTable6[32] = { |
|
4653 5, 13, 20, 27, 32, 36, 41, 45, 50, 54, 59, 63, 68, 72, 77, 81, |
|
4654 86, 90, 95, 99, 103, 108, 112, 117, 121, 126, 130, 135, 139, 144, 148, 152 }; |
|
4655 // corresponding to 0.7, 179 values quantized to 64 |
|
4656 const TUint8 quantTable7[64] = { |
|
4657 5, 8, 11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, |
|
4658 53, 56, 59, 62, 65, 68, 71, 74, 77, 80, 83, 86, 89, 92, 95, 98, |
|
4659 101, 104, 107, 110, 113, 116, 119, 122, 125, 128, 131, 134, 137, 140, 143, 146, |
|
4660 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 178 }; |
|
4661 // corresponding to 0.8, 204 values quantized to 64 |
|
4662 const TUint8 quantTable8[64] = { |
|
4663 5, 10, 15, 20, 25, 29, 33, 36, 40, 42, 45, 48, 51, 54, 57, 60, |
|
4664 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 93, 96, 99, 102, 105, 108, |
|
4665 111, 114, 117, 120, 123, 126, 129, 132, 135, 138, 141, 144, 147, 150, 153, 156, |
|
4666 159, 162, 165, 168, 171, 174, 177, 180, 183, 186, 189, 192, 195, 198, 201, 203 }; |
|
4667 // corresponding to 0.9, 230 values quantized to 64 |
|
4668 const TUint8 quantTable9[64] = { |
|
4669 5, 10, 15, 19, 23, 26, 30, 33, 37, 40, 44, 47, 51, 54, 58, 61, |
|
4670 65, 68, 72, 75, 79, 82, 86, 89, 93, 96, 100, 103, 107, 110, 114, 117, |
|
4671 121, 124, 128, 131, 135, 138, 142, 145, 149, 152, 156, 159, 163, 166, 170, 173, |
|
4672 177, 180, 184, 187, 191, 194, 198, 201, 205, 208, 212, 215, 219, 222, 226, 228 }; |
|
4673 const TUint8 indexTable[10]={1,2,3,4,5,6,7,8,9,10}; |
|
4674 |
|
4675 // figure out if the transition is at the beginning or end of the clip |
|
4676 TInt index; |
|
4677 switch(aTransitionPosition) |
|
4678 { |
|
4679 case EPositionStartOfClip: // start-of-clip transition |
|
4680 if( (index = iStartNumberOfTransitionFrames - aTransitionFramenumber-1) < 0 ) index = 0; |
|
4681 break; |
|
4682 case EPositionEndOfClip: // end-of-clip transition |
|
4683 if( (index = aTransitionFramenumber) >= iEndNumberOfTransitionFrames ) index = iEndNumberOfTransitionFrames - 1; |
|
4684 break; |
|
4685 default: |
|
4686 index = 0; |
|
4687 break; |
|
4688 } |
|
4689 position = indexTable[index]; |
|
4690 |
|
4691 if(aTransitionColor==EColorWhite) |
|
4692 { |
|
4693 switch(position) // white |
|
4694 { |
|
4695 case 10: // 0% frame1, 100% frame2 |
|
4696 // Y |
|
4697 value = 254; chr = value; |
|
4698 ptr.Set(yFrame, yLength, yLength); |
|
4699 ptr.Fill(chr); |
|
4700 // U,V |
|
4701 value = 128; chr = value; |
|
4702 ptr.Set(uFrame, uLength<<1, uLength<<1); |
|
4703 ptr.Fill(chr); |
|
4704 break; |
|
4705 case 9: // 10% frame1, 90% frame2 |
|
4706 value = quantTable9[63]; // 90% of 254 (white) |
|
4707 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4708 *yFrame = (TUint8)(quantTable1[(*yFrame)>>5] + value); |
|
4709 value = 113; // 90% of 128 |
|
4710 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4711 { |
|
4712 *uFrame = (TUint8)(quantTable1[(*uFrame)>>5] + value); |
|
4713 *vFrame = (TUint8)(quantTable1[(*vFrame)>>5] + value); |
|
4714 } |
|
4715 break; |
|
4716 case 8: // 20% frame1, 80% frame2 |
|
4717 value = quantTable8[63]; // 80% of 254 (white) |
|
4718 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4719 *yFrame = (TUint8)(quantTable2[(*yFrame)>>4] + value); |
|
4720 value = 98; // 80% of 128 |
|
4721 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4722 { |
|
4723 *uFrame = (TUint8)(quantTable2[(*uFrame)>>4] + value); |
|
4724 *vFrame = (TUint8)(quantTable2[(*vFrame)>>4] + value); |
|
4725 } |
|
4726 break; |
|
4727 case 7: // 30% frame1, 70% frame2 |
|
4728 value = quantTable7[63]; // 70% of 254 (white) |
|
4729 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4730 *yFrame = (TUint8)(quantTable3[(*yFrame)>>4] + value); |
|
4731 value = 86; // 70% of 128 |
|
4732 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4733 { |
|
4734 *uFrame = (TUint8)(quantTable3[(*uFrame)>>4] + value); |
|
4735 *vFrame = (TUint8)(quantTable3[(*vFrame)>>4] + value); |
|
4736 } |
|
4737 break; |
|
4738 case 6: // 40% frame1, 60% frame2 |
|
4739 value = quantTable6[31]; // 60% of 254 (white) |
|
4740 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4741 *yFrame = (TUint8)(quantTable4[(*yFrame)>>3] + value); |
|
4742 value = 72; //77; // 60% of 128 |
|
4743 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4744 { |
|
4745 *uFrame = (TUint8)(quantTable4[(*uFrame)>>3] + value); |
|
4746 *vFrame = (TUint8)(quantTable4[(*vFrame)>>3] + value); |
|
4747 } |
|
4748 break; |
|
4749 case 5: // 50% frame1, 50% frame2 |
|
4750 value = quantTable5[31]; // 50% of 254 (white) |
|
4751 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4752 *yFrame = (TUint8)(quantTable5[(*yFrame)>>3] + value); |
|
4753 value = 62; // 50% of 128 |
|
4754 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4755 { |
|
4756 *uFrame = (TUint8)(quantTable5[(*uFrame)>>3] + value); |
|
4757 *vFrame = (TUint8)(quantTable5[(*vFrame)>>3] + value); |
|
4758 } |
|
4759 break; |
|
4760 case 4: // 60% frame1, 40% frame2 |
|
4761 value = quantTable4[31]; // 40% of 254 (white) |
|
4762 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4763 *yFrame = (TUint8)(quantTable6[(*yFrame)>>3] + value); |
|
4764 value = 44; //51; // 40% of 128 |
|
4765 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4766 { |
|
4767 *uFrame = (TUint8)(quantTable6[(*uFrame)>>3] + value); |
|
4768 *vFrame = (TUint8)(quantTable6[(*vFrame)>>3] + value); |
|
4769 } |
|
4770 break; |
|
4771 case 3: // 70% frame1, 30% frame2 |
|
4772 value = quantTable3[15]; // 30% of 254 (white) |
|
4773 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4774 *yFrame = (TUint8)(quantTable7[(*yFrame)>>2] + value); |
|
4775 value = 28; //38; // 30% of 128 |
|
4776 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4777 { |
|
4778 *uFrame = (TUint8)(quantTable7[(*uFrame)>>2] + value); |
|
4779 *vFrame = (TUint8)(quantTable7[(*vFrame)>>2] + value); |
|
4780 } |
|
4781 break; |
|
4782 case 2: // 80% frame1, 20% frame2 |
|
4783 value = quantTable2[15]; // 20% of 254 (white) |
|
4784 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4785 *yFrame = (TUint8)(quantTable8[(*yFrame)>>2] + value); |
|
4786 value = 18; //25; // 20% of 128 |
|
4787 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4788 { |
|
4789 *uFrame = (TUint8)(quantTable8[(*uFrame)>>2] + value); |
|
4790 *vFrame = (TUint8)(quantTable8[(*vFrame)>>2] + value); |
|
4791 } |
|
4792 break; |
|
4793 case 1: // 90% frame1, 10% frame2 |
|
4794 value = quantTable1[7]; // 10% of 254 (white) |
|
4795 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4796 *yFrame = (TUint8)(quantTable9[(*yFrame)>>2] + value); |
|
4797 value = 8; //13; // 10% of 128 |
|
4798 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4799 { |
|
4800 *uFrame = (TUint8)(quantTable9[(*uFrame)>>2] + value); |
|
4801 *vFrame = (TUint8)(quantTable9[(*vFrame)>>2] + value); |
|
4802 } |
|
4803 break; |
|
4804 default: // e.g., 100% frame1, 0% frame2 |
|
4805 break; |
|
4806 } |
|
4807 } |
|
4808 else if(aTransitionColor==EColorBlack) // black |
|
4809 { |
|
4810 switch(position) |
|
4811 { |
|
4812 case 10: // 0% frame1, 100% frame2 |
|
4813 // Y |
|
4814 value = 4; chr = value; |
|
4815 ptr.Set(yFrame, yLength, yLength); |
|
4816 ptr.Fill(chr); |
|
4817 // U,V |
|
4818 value = 128; chr = value; |
|
4819 ptr.Set(uFrame, uLength<<1, uLength<<1); |
|
4820 ptr.Fill(chr); |
|
4821 break; |
|
4822 case 9: // 10% frame1, 90% frame2 |
|
4823 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4824 *yFrame = (TUint8)(quantTable1[(*yFrame)>>5]); |
|
4825 value = 113; // 90% of 128 |
|
4826 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4827 { |
|
4828 *uFrame = (TUint8)(quantTable1[(*uFrame)>>5] + value); |
|
4829 *vFrame = (TUint8)(quantTable1[(*vFrame)>>5] + value); |
|
4830 } |
|
4831 break; |
|
4832 case 8: // 20% frame1, 80% frame2 |
|
4833 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4834 *yFrame = (TUint8)(quantTable2[(*yFrame)>>4]); |
|
4835 value = 98; // 80% of 128 |
|
4836 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4837 { |
|
4838 *uFrame = (TUint8)(quantTable2[(*uFrame)>>4] + value); |
|
4839 *vFrame = (TUint8)(quantTable2[(*vFrame)>>4] + value); |
|
4840 } |
|
4841 break; |
|
4842 case 7: // 30% frame1, 70% frame2 |
|
4843 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4844 *yFrame = (TUint8)(quantTable3[(*yFrame)>>4]); |
|
4845 value = 86; // 70% of 128 |
|
4846 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4847 { |
|
4848 *uFrame = (TUint8)(quantTable3[(*uFrame)>>4] + value); |
|
4849 *vFrame = (TUint8)(quantTable3[(*vFrame)>>4] + value); |
|
4850 } |
|
4851 break; |
|
4852 case 6: // 40% frame1, 60% frame2 |
|
4853 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4854 *yFrame = (TUint8)(quantTable4[(*yFrame)>>3]); |
|
4855 value = 72; //77; // 60% of 128 |
|
4856 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4857 { |
|
4858 *uFrame = (TUint8)(quantTable4[(*uFrame)>>3] + value); |
|
4859 *vFrame = (TUint8)(quantTable4[(*vFrame)>>3] + value); |
|
4860 } |
|
4861 break; |
|
4862 case 5: // 50% frame1, 50% frame2 |
|
4863 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4864 *yFrame = (TUint8)(quantTable5[(*yFrame)>>3]); |
|
4865 value = 62; // 50% of 128 |
|
4866 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4867 { |
|
4868 *uFrame = (TUint8)(quantTable5[(*uFrame)>>3] + value); |
|
4869 *vFrame = (TUint8)(quantTable5[(*vFrame)>>3] + value); |
|
4870 } |
|
4871 break; |
|
4872 case 4: // 60% frame1, 40% frame2 |
|
4873 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4874 *yFrame = (TUint8)(quantTable6[(*yFrame)>>3]); |
|
4875 value = 44; //51; // 40% of 128 |
|
4876 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4877 { |
|
4878 *uFrame = (TUint8)(quantTable6[(*uFrame)>>3] + value); |
|
4879 *vFrame = (TUint8)(quantTable6[(*vFrame)>>3] + value); |
|
4880 } |
|
4881 break; |
|
4882 case 3: // 70% frame1, 30% frame2 |
|
4883 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4884 *yFrame = (TUint8)(quantTable7[(*yFrame)>>2]); |
|
4885 value = 28; //38; // 30% of 128 |
|
4886 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4887 { |
|
4888 *uFrame = (TUint8)(quantTable7[(*uFrame)>>2] + value); |
|
4889 *vFrame = (TUint8)(quantTable7[(*vFrame)>>2] + value); |
|
4890 } |
|
4891 break; |
|
4892 case 2: // 80% frame1, 20% frame2 |
|
4893 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4894 *yFrame = (TUint8)(quantTable8[(*yFrame)>>2]); |
|
4895 value = 18; //25; // 20% of 128 |
|
4896 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4897 { |
|
4898 *uFrame = (TUint8)(quantTable8[(*uFrame)>>2] + value); |
|
4899 *vFrame = (TUint8)(quantTable8[(*vFrame)>>2] + value); |
|
4900 } |
|
4901 break; |
|
4902 case 1: // 90% frame1, 10% frame2 |
|
4903 for(i=0; i<yLength; i++, yFrame++) // Y |
|
4904 *yFrame = (TUint8)(quantTable9[(*yFrame)>>2]); |
|
4905 value = 8; //13; // 10% of 128 |
|
4906 for(i=0; i<uLength; i++, uFrame++,vFrame++) // U |
|
4907 { |
|
4908 *uFrame = (TUint8)(quantTable9[(*uFrame)>>2] + value); |
|
4909 *vFrame = (TUint8)(quantTable9[(*vFrame)>>2] + value); |
|
4910 } |
|
4911 break; |
|
4912 default: // e.g., 100% frame1, 0% frame2 |
|
4913 break; |
|
4914 } |
|
4915 } |
|
4916 return; |
|
4917 } |
|
4918 |
|
4919 // --------------------------------------------------------- |
|
4920 // CVideoProcessor::ApplyBlendingTransitionEffect |
|
4921 // Applies blending transition effect between two YUV frames |
|
4922 // (other items were commented in a header). |
|
4923 // --------------------------------------------------------- |
|
4924 // |
|
4925 void CVideoProcessor::ApplyBlendingTransitionEffect(TUint8* aYUVPtr1, |
|
4926 TUint8* aYUVPtr2, |
|
4927 TInt aRepeatFrame, |
|
4928 TInt aTransitionFramenumber) |
|
4929 { |
|
4930 TInt i; |
|
4931 TSize tempSize = iProcessor->GetMovieResolution(); |
|
4932 |
|
4933 TInt yLength = tempSize.iWidth*tempSize.iHeight; |
|
4934 TInt uLength = yLength>>2; |
|
4935 TInt yuvLength = yLength + (yLength>>1); |
|
4936 TUint8* yFrame1 = (TUint8*)aYUVPtr1; |
|
4937 TUint8* uFrame1 = (TUint8*)(yFrame1+yLength); |
|
4938 TUint8* vFrame1 = (TUint8*)(uFrame1+uLength); |
|
4939 TUint8* yFrame2 = (TUint8*)aYUVPtr2; |
|
4940 TUint8* uFrame2 = (TUint8*)(yFrame2+yLength); |
|
4941 TUint8* vFrame2 = (TUint8*)(uFrame2+uLength); |
|
4942 TInt position; |
|
4943 TPtr8 ptr(0,0); |
|
4944 const TInt numberOfTables = 10; |
|
4945 |
|
4946 // corresponding to 0.1, 26 values quantized to 8 |
|
4947 const TUint8 quantTable1[8] = { |
|
4948 4, 7, 10, 13, 16, 19, 22, 24 }; |
|
4949 // corresponding to 0.2, 52 values quantized to 16 |
|
4950 const TUint8 quantTable2[16] = { |
|
4951 4, 8, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 50 }; |
|
4952 // corresponding to 0.3, 77 values quantized to 16 |
|
4953 const TUint8 quantTable3[16] = { |
|
4954 5, 10, 15, 20, 24, 29, 33, 38, 42, 47, 51, 56, 61, 66, 71, 75 }; |
|
4955 // corresponding to 0.4, 103 values quantized to 32 |
|
4956 const TUint8 quantTable4[32] = { |
|
4957 5, 10, 14, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, |
|
4958 57, 60, 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 93, 96, 99, 101 }; |
|
4959 // corresponding to 0.5, 128 values quantized to 32 |
|
4960 const TUint8 quantTable5[32] = { |
|
4961 3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63, |
|
4962 67, 71, 75, 79, 83, 87, 91, 95, 99, 103, 107, 111, 115, 119, 123, 126 }; |
|
4963 // corresponding to 0.6, 154 values quantized to 32 |
|
4964 const TUint8 quantTable6[32] = { |
|
4965 5, 13, 20, 27, 32, 36, 41, 45, 50, 54, 59, 63, 68, 72, 77, 81, |
|
4966 86, 90, 95, 99, 103, 108, 112, 117, 121, 126, 130, 135, 139, 144, 148, 152 }; |
|
4967 // corresponding to 0.7, 179 values quantized to 64 |
|
4968 const TUint8 quantTable7[64] = { |
|
4969 5, 8, 11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, |
|
4970 53, 56, 59, 62, 65, 68, 71, 74, 77, 80, 83, 86, 89, 92, 95, 98, |
|
4971 101, 104, 107, 110, 113, 116, 119, 122, 125, 128, 131, 134, 137, 140, 143, 146, |
|
4972 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 178 }; |
|
4973 // corresponding to 0.8, 204 values quantized to 64 |
|
4974 const TUint8 quantTable8[64] = { |
|
4975 5, 10, 15, 20, 25, 29, 33, 36, 40, 42, 45, 48, 51, 54, 57, 60, |
|
4976 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 93, 96, 99, 102, 105, 108, |
|
4977 111, 114, 117, 120, 123, 126, 129, 132, 135, 138, 141, 144, 147, 150, 153, 156, |
|
4978 159, 162, 165, 168, 171, 174, 177, 180, 183, 186, 189, 192, 195, 198, 201, 203 }; |
|
4979 // corresponding to 0.9, 230 values quantized to 64 |
|
4980 const TUint8 quantTable9[64] = { |
|
4981 5, 10, 15, 19, 23, 26, 30, 33, 37, 40, 44, 47, 51, 54, 58, 61, |
|
4982 65, 68, 72, 75, 79, 82, 86, 89, 93, 96, 100, 103, 107, 110, 114, 117, |
|
4983 121, 124, 128, 131, 135, 138, 142, 145, 149, 152, 156, 159, 163, 166, 170, 173, |
|
4984 177, 180, 184, 187, 191, 194, 198, 201, 205, 208, 212, 215, 219, 222, 226, 228 }; |
|
4985 |
|
4986 const TUint8 indexTable[10]={1,2,3,4,5,6,7,8,9,10}; |
|
4987 |
|
4988 // figure out the position of the index (determines which table to use) |
|
4989 TInt frameNumber = aTransitionFramenumber; |
|
4990 if(frameNumber>=iStartNumberOfTransitionFrames) frameNumber=iStartNumberOfTransitionFrames-1; |
|
4991 TInt index = (frameNumber<<1) + aRepeatFrame; |
|
4992 if(index>=numberOfTables) index=numberOfTables-1; |
|
4993 position = indexTable[index]; |
|
4994 |
|
4995 // calculate new values |
|
4996 switch(position) |
|
4997 { |
|
4998 case 10: // 0% frame1, 100% frame2 |
|
4999 ptr.Set(yFrame1,yuvLength,yuvLength); |
|
5000 ptr.Copy(yFrame2,yuvLength); |
|
5001 break; |
|
5002 case 9: // 10% frame1, 90% frame2 |
|
5003 for(i=0; i<yLength; i++, yFrame1++,yFrame2++) // Y |
|
5004 *yFrame1 = (TUint8)(quantTable1[(*yFrame1)>>5] + quantTable9[(*yFrame2)>>2]); |
|
5005 for(i=0; i<uLength; i++, uFrame1++,uFrame2++,vFrame1++,vFrame2++) // U |
|
5006 { |
|
5007 *uFrame1 = (TUint8)(quantTable1[(*uFrame1)>>5] + quantTable9[(*uFrame2)>>2] - 10); |
|
5008 *vFrame1 = (TUint8)(quantTable1[(*vFrame1)>>5] + quantTable9[(*vFrame2)>>2] - 10); |
|
5009 } |
|
5010 break; |
|
5011 case 8: // 20% frame1, 80% frame2 |
|
5012 for(i=0; i<yLength; i++, yFrame1++,yFrame2++) // Y |
|
5013 *yFrame1 = (TUint8)(quantTable2[(*yFrame1)>>4] + quantTable8[(*yFrame2)>>2]); |
|
5014 for(i=0; i<uLength; i++, uFrame1++,uFrame2++,vFrame1++,vFrame2++) // U |
|
5015 { |
|
5016 *uFrame1 = (TUint8)(quantTable2[(*uFrame1)>>4] + quantTable8[(*uFrame2)>>2] - 15); |
|
5017 *vFrame1 = (TUint8)(quantTable2[(*vFrame1)>>4] + quantTable8[(*vFrame2)>>2] - 15); |
|
5018 } |
|
5019 break; |
|
5020 case 7: // 30% frame1, 70% frame2 |
|
5021 for(i=0; i<yLength; i++, yFrame1++,yFrame2++) // Y |
|
5022 *yFrame1 = (TUint8)(quantTable3[(*yFrame1)>>4] + quantTable7[(*yFrame2)>>2]); |
|
5023 for(i=0; i<uLength; i++, uFrame1++,uFrame2++,vFrame1++,vFrame2++) // U |
|
5024 { |
|
5025 *uFrame1 = (TUint8)(quantTable3[(*uFrame1)>>4] + quantTable7[(*uFrame2)>>2] - 15); |
|
5026 *vFrame1 = (TUint8)(quantTable3[(*vFrame1)>>4] + quantTable7[(*vFrame2)>>2] - 15); |
|
5027 } |
|
5028 break; |
|
5029 case 6: // 40% frame1, 60% frame2 |
|
5030 for(i=0; i<yLength; i++, yFrame1++,yFrame2++) // Y |
|
5031 *yFrame1 = (TUint8)(quantTable4[(*yFrame1)>>3] + quantTable6[(*yFrame2)>>3]); |
|
5032 for(i=0; i<uLength; i++, uFrame1++,uFrame2++,vFrame1++,vFrame2++) // U |
|
5033 { |
|
5034 *uFrame1 = (TUint8)(quantTable4[(*uFrame1)>>3] + quantTable6[(*uFrame2)>>3] - 10); |
|
5035 *vFrame1 = (TUint8)(quantTable4[(*vFrame1)>>3] + quantTable6[(*vFrame2)>>3] - 10); |
|
5036 } |
|
5037 break; |
|
5038 case 5: // 50% frame1, 50% frame2 |
|
5039 for(i=0; i<yLength; i++, yFrame1++,yFrame2++) // Y |
|
5040 *yFrame1 = (TUint8)(quantTable5[(*yFrame1)>>3] + quantTable5[(*yFrame2)>>3]); |
|
5041 for(i=0; i<uLength; i++, uFrame1++,uFrame2++,vFrame1++,vFrame2++) // U |
|
5042 { |
|
5043 *uFrame1 = (TUint8)(quantTable5[(*uFrame1)>>3] + quantTable5[(*uFrame2)>>3] - 5); |
|
5044 *vFrame1 = (TUint8)(quantTable5[(*vFrame1)>>3] + quantTable5[(*vFrame2)>>3] - 5); |
|
5045 } |
|
5046 break; |
|
5047 case 4: // 60% frame1, 40% frame2 |
|
5048 for(i=0; i<yLength; i++, yFrame1++,yFrame2++) // Y |
|
5049 *yFrame1 = (TUint8)(quantTable6[(*yFrame1)>>3] + quantTable4[(*yFrame2)>>3]); |
|
5050 for(i=0; i<uLength; i++, uFrame1++,uFrame2++,vFrame1++,vFrame2++) // U |
|
5051 { |
|
5052 *uFrame1 = (TUint8)(quantTable6[(*uFrame1)>>3] + quantTable4[(*uFrame2)>>3] - 10); |
|
5053 *vFrame1 = (TUint8)(quantTable6[(*vFrame1)>>3] + quantTable4[(*vFrame2)>>3] - 10); |
|
5054 } |
|
5055 break; |
|
5056 case 3: // 70% frame1, 30% frame2 |
|
5057 for(i=0; i<yLength; i++, yFrame1++,yFrame2++) // Y |
|
5058 *yFrame1 = (TUint8)(quantTable7[(*yFrame1)>>2] + quantTable3[(*yFrame2)>>4]); |
|
5059 for(i=0; i<uLength; i++, uFrame1++,uFrame2++,vFrame1++,vFrame2++) // U |
|
5060 { |
|
5061 *uFrame1 = (TUint8)(quantTable7[(*uFrame1)>>2] + quantTable3[(*uFrame2)>>4] - 8); |
|
5062 *vFrame1 = (TUint8)(quantTable7[(*vFrame1)>>2] + quantTable3[(*vFrame2)>>4] - 8); |
|
5063 } |
|
5064 break; |
|
5065 case 2: // 80% frame1, 20% frame2 |
|
5066 for(i=0; i<yLength; i++, yFrame1++,yFrame2++) // Y |
|
5067 *yFrame1 = (TUint8)(quantTable8[(*yFrame1)>>2] + quantTable2[(*yFrame2)>>4]); |
|
5068 for(i=0; i<uLength; i++, uFrame1++,uFrame2++,vFrame1++,vFrame2++) // U |
|
5069 { |
|
5070 *uFrame1 = (TUint8)(quantTable8[(*uFrame1)>>2] + quantTable2[(*uFrame2)>>4] - 8); |
|
5071 *vFrame1 = (TUint8)(quantTable8[(*vFrame1)>>2] + quantTable2[(*vFrame2)>>4] - 8); |
|
5072 } |
|
5073 break; |
|
5074 case 1: // 90% frame1, 10% frame2 |
|
5075 for(i=0; i<yLength; i++, yFrame1++,yFrame2++) // Y |
|
5076 *yFrame1 = (TUint8)(quantTable9[(*yFrame1)>>2] + quantTable1[(*yFrame2)>>5]); |
|
5077 for(i=0; i<uLength; i++, uFrame1++,uFrame2++,vFrame1++,vFrame2++) // U |
|
5078 { |
|
5079 *uFrame1 = (TUint8)(quantTable9[(*uFrame1)>>2] + quantTable1[(*uFrame2)>>5] - 5); |
|
5080 *vFrame1 = (TUint8)(quantTable9[(*vFrame1)>>2] + quantTable1[(*vFrame2)>>5] - 5); |
|
5081 } |
|
5082 break; |
|
5083 default: // e.g., 100% frame1, 0% frame2 |
|
5084 break; |
|
5085 } |
|
5086 return; |
|
5087 } |
|
5088 |
|
5089 |
|
5090 // --------------------------------------------------------- |
|
5091 // CVideoProcessor::ApplySlidingTransitionEffect |
|
5092 // Applies sliding transition effect between two YUV frames |
|
5093 // (other items were commented in a header). |
|
5094 // --------------------------------------------------------- |
|
5095 // |
|
5096 void CVideoProcessor::ApplySlidingTransitionEffect(TUint8* aYUVPtr1, |
|
5097 TUint8* aYUVPtr2, |
|
5098 TVedMiddleTransitionEffect aVedMiddleTransitionEffect, |
|
5099 TInt aRepeatFrame, |
|
5100 TInt aTransitionFramenumber) |
|
5101 { |
|
5102 TInt i; |
|
5103 TSize tempSize = iProcessor->GetMovieResolution(); |
|
5104 TInt yLength = tempSize.iWidth*tempSize.iHeight; |
|
5105 TInt uLength = yLength>>2; |
|
5106 TInt yWidth = tempSize.iWidth; |
|
5107 TInt uWidth = tempSize.iWidth>>1; |
|
5108 TInt yHeight = tempSize.iHeight; |
|
5109 TInt uHeight = tempSize.iHeight>>1; |
|
5110 TUint8* yFrame1 = (TUint8*)aYUVPtr1; |
|
5111 TUint8* uFrame1 = (TUint8*)(yFrame1+yLength); |
|
5112 TUint8* vFrame1 = (TUint8*)(uFrame1+uLength); |
|
5113 TUint8* yFrame2 = (TUint8*)aYUVPtr2; |
|
5114 TUint8* uFrame2 = (TUint8*)(yFrame2+yLength); |
|
5115 TUint8* vFrame2 = (TUint8*)(uFrame2+uLength); |
|
5116 TPtr8 ptr(0,0); |
|
5117 TInt offset = 0; |
|
5118 TInt ySliceWidth = 0; |
|
5119 TInt uSliceWidth = 0; |
|
5120 TInt sliceSize = 0; |
|
5121 TInt frameNumber = (aTransitionFramenumber<<1) + aRepeatFrame; |
|
5122 |
|
5123 switch(aVedMiddleTransitionEffect) |
|
5124 { |
|
5125 case EVedMiddleTransitionEffectWipeLeftToRight: |
|
5126 // figure out the amount of data to change |
|
5127 VDASSERT(iStartNumberOfTransitionFrames,19); |
|
5128 ySliceWidth = (TInt)((TReal)yWidth * (TReal)(frameNumber+1)/(TReal)(iStartNumberOfTransitionFrames<<1) + 0.5); |
|
5129 if(ySliceWidth>yWidth) ySliceWidth = yWidth; |
|
5130 uSliceWidth = ySliceWidth>>1; |
|
5131 // copy the relevant portions of the image from frame2 to frame1 |
|
5132 // y |
|
5133 for(i=0; i<yHeight; i++, yFrame1+=yWidth,yFrame2+=yWidth) |
|
5134 { |
|
5135 ptr.Set(yFrame1,ySliceWidth,ySliceWidth); |
|
5136 ptr.Copy(yFrame2,ySliceWidth); |
|
5137 } |
|
5138 // u,v |
|
5139 for(i=0; i<uHeight; i++, uFrame1+=uWidth,uFrame2+=uWidth,vFrame1+=uWidth,vFrame2+=uWidth) |
|
5140 { |
|
5141 ptr.Set(uFrame1,uSliceWidth,uSliceWidth); |
|
5142 ptr.Copy(uFrame2,uSliceWidth); |
|
5143 ptr.Set(vFrame1,uSliceWidth,uSliceWidth); |
|
5144 ptr.Copy(vFrame2,uSliceWidth); |
|
5145 } |
|
5146 break; |
|
5147 case EVedMiddleTransitionEffectWipeRightToLeft: |
|
5148 // figure out the amount of data to change |
|
5149 VDASSERT(iStartNumberOfTransitionFrames,20); |
|
5150 ySliceWidth = (TInt)((TReal)yWidth * (TReal)(frameNumber+1)/(TReal)(iStartNumberOfTransitionFrames<<1) + 0.5); |
|
5151 if(ySliceWidth>yWidth) ySliceWidth = yWidth; |
|
5152 uSliceWidth = ySliceWidth>>1; |
|
5153 // evaluate the yuv offsets and new positions to point to in the buffer |
|
5154 offset = yWidth-ySliceWidth; |
|
5155 yFrame1+=offset; |
|
5156 yFrame2+=offset; |
|
5157 offset = uWidth-uSliceWidth; |
|
5158 uFrame1+=offset; |
|
5159 uFrame2+=offset; |
|
5160 vFrame1+=offset; |
|
5161 vFrame2+=offset; |
|
5162 // copy the relevant portions of the image from frame2 to frame1 |
|
5163 // y |
|
5164 for(i=0; i<yHeight; i++, yFrame1+=yWidth,yFrame2+=yWidth) |
|
5165 { |
|
5166 ptr.Set(yFrame1,ySliceWidth,ySliceWidth); |
|
5167 ptr.Copy(yFrame2,ySliceWidth); |
|
5168 } |
|
5169 // u,v |
|
5170 for(i=0; i<uHeight; i++, uFrame1+=uWidth,uFrame2+=uWidth,vFrame1+=uWidth,vFrame2+=uWidth) |
|
5171 { |
|
5172 ptr.Set(uFrame1,uSliceWidth,uSliceWidth); |
|
5173 ptr.Copy(uFrame2,uSliceWidth); |
|
5174 ptr.Set(vFrame1,uSliceWidth,uSliceWidth); |
|
5175 ptr.Copy(vFrame2,uSliceWidth); |
|
5176 } |
|
5177 break; |
|
5178 case EVedMiddleTransitionEffectWipeTopToBottom: |
|
5179 // figure out the amount of data to change |
|
5180 VDASSERT(iStartNumberOfTransitionFrames,21); |
|
5181 ySliceWidth = (TInt)((TReal)yHeight * (TReal)(frameNumber+1)/(TReal)(iStartNumberOfTransitionFrames<<1) + 0.5); |
|
5182 if(ySliceWidth>yHeight) ySliceWidth = yHeight; |
|
5183 uSliceWidth = ySliceWidth>>1; |
|
5184 // copy the relevant portions of the image from frame2 to frame1 |
|
5185 // y |
|
5186 sliceSize = ySliceWidth * yWidth; |
|
5187 ptr.Set(yFrame1,sliceSize,sliceSize); |
|
5188 ptr.Copy(yFrame2,sliceSize); |
|
5189 // u,v |
|
5190 sliceSize = uSliceWidth * uWidth; |
|
5191 ptr.Set(uFrame1,sliceSize,sliceSize); |
|
5192 ptr.Copy(uFrame2,sliceSize); |
|
5193 ptr.Set(vFrame1,sliceSize,sliceSize); |
|
5194 ptr.Copy(vFrame2,sliceSize); |
|
5195 break; |
|
5196 case EVedMiddleTransitionEffectWipeBottomToTop: |
|
5197 // figure out the amount of data to change |
|
5198 VDASSERT(iStartNumberOfTransitionFrames,22); |
|
5199 ySliceWidth = (TInt)((TReal)yHeight * (TReal)(frameNumber+1)/(TReal)(iStartNumberOfTransitionFrames<<1) + 0.5); |
|
5200 if(ySliceWidth>yHeight) ySliceWidth = yHeight; |
|
5201 uSliceWidth = ySliceWidth>>1; |
|
5202 // evaluate the yuv offsets and new positions to point to in the buffer |
|
5203 offset = (yHeight-ySliceWidth) * yWidth; |
|
5204 yFrame1+=offset; |
|
5205 yFrame2+=offset; |
|
5206 offset = (uHeight-uSliceWidth) * uWidth; |
|
5207 uFrame1+=offset; |
|
5208 uFrame2+=offset; |
|
5209 vFrame1+=offset; |
|
5210 vFrame2+=offset; |
|
5211 // copy the relevant portions of the image from frame2 to frame1 |
|
5212 // y |
|
5213 sliceSize = ySliceWidth * yWidth; |
|
5214 ptr.Set(yFrame1,sliceSize,sliceSize); |
|
5215 ptr.Copy(yFrame2,sliceSize); |
|
5216 // u,v |
|
5217 sliceSize = uSliceWidth * uWidth; |
|
5218 ptr.Set(uFrame1,sliceSize,sliceSize); |
|
5219 ptr.Copy(uFrame2,sliceSize); |
|
5220 ptr.Set(vFrame1,sliceSize,sliceSize); |
|
5221 ptr.Copy(vFrame2,sliceSize); |
|
5222 break; |
|
5223 case EVedMiddleTransitionEffectNone: |
|
5224 case EVedMiddleTransitionEffectDipToBlack: |
|
5225 case EVedMiddleTransitionEffectDipToWhite: |
|
5226 case EVedMiddleTransitionEffectCrossfade: |
|
5227 case EVedMiddleTransitionEffectLast: |
|
5228 default: |
|
5229 break; |
|
5230 } |
|
5231 return; |
|
5232 } |
|
5233 |
|
5234 |
|
5235 // --------------------------------------------------------- |
|
5236 // CVideoProcessor::ApplySpecialEffect |
|
5237 // Applies color effect for a YUV frame |
|
5238 // (other items were commented in a header). |
|
5239 // --------------------------------------------------------- |
|
5240 // |
|
5241 void CVideoProcessor::ApplySpecialEffect(TInt aColorEffect, TUint8* aYUVDataPtr, |
|
5242 TInt aColorToneU, TInt aColorToneV) |
|
5243 { |
|
5244 VDASSERT(aYUVDataPtr,23); |
|
5245 VDASSERT(iVideoWidth,24); |
|
5246 VDASSERT(iVideoHeight,25); |
|
5247 TChar chr; |
|
5248 TInt value; |
|
5249 TInt offset; |
|
5250 TInt length; |
|
5251 // Values for the U & V Fill parameters |
|
5252 TInt uFillValue, vFillValue; |
|
5253 TPtr8 ptr(0,0); |
|
5254 TSize tempSize = iProcessor->GetMovieResolution(); |
|
5255 |
|
5256 // asad - check if mpeg4, then change pixel range from (-128,127) to (0,255) |
|
5257 if (iProcessor->GetOutputVideoType() == EVedVideoTypeMPEG4SimpleProfile) |
|
5258 { |
|
5259 // U |
|
5260 aColorToneU += 128; |
|
5261 if (aColorToneU<0) aColorToneU = 0; |
|
5262 if (aColorToneU>255) aColorToneU = 255; |
|
5263 // V |
|
5264 aColorToneV += 128; |
|
5265 if (aColorToneV<0) aColorToneV = 0; |
|
5266 if (aColorToneV>255) aColorToneV = 255; |
|
5267 } |
|
5268 TChar uChr, vChr; |
|
5269 switch(aColorEffect) |
|
5270 { |
|
5271 case 0/*None*/: |
|
5272 return; |
|
5273 case 1/*BW*/: |
|
5274 value = 128; |
|
5275 chr = value; |
|
5276 offset = tempSize.iWidth*tempSize.iHeight; |
|
5277 length = offset>>1; // u,v data length (2*L/2*W/2) |
|
5278 ptr.Set((TUint8*)(aYUVDataPtr+offset), length, length); |
|
5279 ptr.Fill((TChar)chr); |
|
5280 break; |
|
5281 case 2: |
|
5282 offset = tempSize.iWidth*tempSize.iHeight; |
|
5283 length = offset>>2; |
|
5284 uFillValue = aColorToneU; |
|
5285 uChr = uFillValue; |
|
5286 vFillValue = aColorToneV; |
|
5287 vChr = vFillValue; |
|
5288 |
|
5289 ptr.Set((TUint8*)(aYUVDataPtr + offset), length, length); |
|
5290 ptr.Fill((TChar)uChr); |
|
5291 |
|
5292 offset = 1.25 * offset; // For filling the v-value |
|
5293 ptr.Set((TUint8*)(aYUVDataPtr + offset), length, length); |
|
5294 ptr.Fill((TChar)vChr); |
|
5295 break; |
|
5296 default: |
|
5297 return; |
|
5298 } |
|
5299 } |
|
5300 |
|
5301 // --------------------------------------------------------- |
|
5302 // CVideoProcessor::TFrameOperation2TInt |
|
5303 // Converts frame operation enumeration to int |
|
5304 // (other items were commented in a header). |
|
5305 // --------------------------------------------------------- |
|
5306 // |
|
5307 TInt CVideoProcessor::TFrameOperation2TInt(TDecoderFrameOperation aFrameOperation) |
|
5308 { |
|
5309 switch(aFrameOperation) |
|
5310 { |
|
5311 case EDecodeAndWrite: |
|
5312 return 1; |
|
5313 case EDecodeNoWrite: |
|
5314 return 2; |
|
5315 case EWriteNoDecode: |
|
5316 return 3; |
|
5317 case ENoDecodeNoWrite: |
|
5318 return 4; |
|
5319 default: |
|
5320 return KErrGeneral; |
|
5321 } |
|
5322 } |
|
5323 |
|
5324 // --------------------------------------------------------- |
|
5325 // CVideoProcessor::TColorEffect2TInt |
|
5326 // Converts color effect enumeration to int |
|
5327 // (other items were commented in a header). |
|
5328 // --------------------------------------------------------- |
|
5329 // |
|
5330 TInt CVideoProcessor::TColorEffect2TInt(TVedColorEffect aColorEffect) |
|
5331 { |
|
5332 switch(aColorEffect) |
|
5333 { |
|
5334 case EVedColorEffectNone: |
|
5335 return 0; |
|
5336 case EVedColorEffectBlackAndWhite: |
|
5337 return 1; |
|
5338 case EVedColorEffectToning: |
|
5339 return 2; |
|
5340 default: |
|
5341 return KErrGeneral; |
|
5342 } |
|
5343 } |
|
5344 |
|
5345 |
|
5346 |
|
5347 // --------------------------------------------------------- |
|
5348 // CVideoProcessor::InputDataAvailable |
|
5349 // Overridden CDataProcessor::InputDataAvailable() method |
|
5350 // Called when new input blocks are available |
|
5351 // (other items were commented in a header). |
|
5352 // --------------------------------------------------------- |
|
5353 // |
|
5354 void CVideoProcessor::InputDataAvailable(TAny* /*aUserPointer*/) |
|
5355 { |
|
5356 PRINT((_L("CVideoProcessor::InputDataAvailable()"))); |
|
5357 // Signal ourselves if we are decoding and a request is not |
|
5358 // pending: |
|
5359 if ( iDecoding && !iTranscoderInitPending && !iDecodePending && |
|
5360 (iStatus == KRequestPending) ) |
|
5361 { |
|
5362 PRINT((_L("CVideoProcessor::InputDataAvailable() - complete request"))); |
|
5363 TRequestStatus *status = &iStatus; |
|
5364 User::RequestComplete(status, KErrNone); |
|
5365 } |
|
5366 } |
|
5367 |
|
5368 |
|
5369 // --------------------------------------------------------- |
|
5370 // CVideoProcessor::StreamEndReached |
|
5371 // Overridden CDataProcessor::StreamEndReached() method |
|
5372 // Called when input stream has ended |
|
5373 // (other items were commented in a header). |
|
5374 // --------------------------------------------------------- |
|
5375 // |
|
5376 |
|
5377 void CVideoProcessor::StreamEndReached(TAny* /*aUserPointer*/) |
|
5378 { |
|
5379 PRINT((_L("CVideoProcessor::StreamEndReached()"))); |
|
5380 iStreamEnd = ETrue; |
|
5381 } |
|
5382 |
|
5383 // --------------------------------------------------------- |
|
5384 // CVideoProcessor::DoCancel |
|
5385 // Cancels any asynchronous requests pending. |
|
5386 // |
|
5387 // (other items were commented in a header). |
|
5388 // --------------------------------------------------------- |
|
5389 // |
|
5390 void CVideoProcessor::DoCancel() |
|
5391 { |
|
5392 |
|
5393 // Cancel our internal request |
|
5394 if ( iStatus == KRequestPending ) |
|
5395 { |
|
5396 TRequestStatus *status = &iStatus; |
|
5397 User::RequestComplete(status, KErrCancel); |
|
5398 } |
|
5399 |
|
5400 } |
|
5401 |
|
5402 // --------------------------------------------------------- |
|
5403 // CVideoProcessor::GetNextClipTransitionInfo |
|
5404 // Get the start transition info of the next clip. |
|
5405 // (other items were commented in a header). |
|
5406 // --------------------------------------------------------- |
|
5407 // |
|
5408 void CVideoProcessor::GetNextClipTransitionInfo() |
|
5409 { |
|
5410 if ( iNextTransitionNumber == -1 ) |
|
5411 { |
|
5412 iNextTransitionNumber = iProcessor->NextClipStartTransitionNumber(); |
|
5413 } |
|
5414 } |
|
5415 |
|
5416 // --------------------------------------------------------- |
|
5417 // CVideoProcessor::DetermineResolutionChange |
|
5418 // This function checks if the video clip is needed to be resampled |
|
5419 // (other items were commented in a header). |
|
5420 // --------------------------------------------------------- |
|
5421 // Resolution Transcoder, check if this video clip need to be resample |
|
5422 |
|
5423 TBool CVideoProcessor::DetermineResolutionChange() |
|
5424 { |
|
5425 TSize VideoClipResolution = iProcessor->GetVideoClipResolution(); |
|
5426 TSize MovieResolution = iProcessor->GetMovieResolution(); |
|
5427 if (VideoClipResolution != MovieResolution) |
|
5428 return ETrue; |
|
5429 else |
|
5430 return EFalse; |
|
5431 } |
|
5432 |
|
5433 // --------------------------------------------------------- |
|
5434 // CVideoProcessor::DetermineFrameRateChange |
|
5435 // This function checks if the frame rate must be changed |
|
5436 // => clip re-encoded |
|
5437 // (other items were commented in a header). |
|
5438 // --------------------------------------------------------- |
|
5439 // |
|
5440 TBool CVideoProcessor::DetermineFrameRateChange() |
|
5441 { |
|
5442 |
|
5443 TReal clipFrameRate = iProcessor->GetVideoClipFrameRate(); |
|
5444 TReal movieFrameRate = iProcessor->GetMovieFrameRate(); |
|
5445 |
|
5446 // Do re-encoding only when reducing frame rate, |
|
5447 // otherwise we would have to come up with new frames |
|
5448 if ( movieFrameRate > 0 && clipFrameRate > movieFrameRate ) |
|
5449 return ETrue; |
|
5450 |
|
5451 return EFalse; |
|
5452 |
|
5453 } |
|
5454 |
|
5455 // --------------------------------------------------------- |
|
5456 // CVideoProcessor::DetermineFrameRateChange |
|
5457 // This function checks if the bitrate must be changed |
|
5458 // => clip re-encoded |
|
5459 // (other items were commented in a header). |
|
5460 // --------------------------------------------------------- |
|
5461 // |
|
5462 TBool CVideoProcessor::DetermineBitRateChange() |
|
5463 { |
|
5464 |
|
5465 // : AVC bitrate transcoding from a higher level |
|
5466 // to lower if resolution transcoding is not needed |
|
5467 |
|
5468 if ( iProcessor->GetMovieVideoBitrate() > 0 ) // movie has bitrate restriction => need to transcode |
|
5469 return ETrue; |
|
5470 |
|
5471 if ( iProcessor->GetOutputVideoType() == EVedVideoTypeH263Profile0Level10 ) |
|
5472 { |
|
5473 if (iDataFormat == EDataH263) |
|
5474 { |
|
5475 if ( (iProcessor->GetCurrentClipVideoType() != EVedVideoTypeH263Profile0Level10) && |
|
5476 (iProcessor->GetCurrentClipVideoType() != EVedVideoTypeUnrecognized) ) |
|
5477 return ETrue; |
|
5478 } |
|
5479 |
|
5480 else if (iDataFormat == EDataMPEG4) |
|
5481 { |
|
5482 // level 0 and level 1 max bitrate is 64 kb/s |
|
5483 // others need to be transcoded |
|
5484 if (iInputMPEG4ProfileLevelId != 8 && iInputMPEG4ProfileLevelId != 1) |
|
5485 return ETrue; |
|
5486 } |
|
5487 } |
|
5488 |
|
5489 return EFalse; |
|
5490 |
|
5491 } |
|
5492 |
|
5493 |
|
5494 // --------------------------------------------------------- |
|
5495 // CVideoProcessor::GetFrameDuration |
|
5496 // Calculates the duration of current frame |
|
5497 // (other items were commented in a header). |
|
5498 // --------------------------------------------------------- |
|
5499 // |
|
5500 TInt CVideoProcessor::GetFrameDuration(TInt aFrameNumber) |
|
5501 { |
|
5502 // calculate frame duration in ticks |
|
5503 TInt startFrame = iProcessor->GetOutputNumberOfFrames() - iNumberOfFrames; |
|
5504 TInt absFrameNumber = startFrame + aFrameNumber; |
|
5505 TInt cur = absFrameNumber; |
|
5506 TInt next = cur+1; |
|
5507 |
|
5508 // frameDuration is in ticks, with timescale of the current input clip |
|
5509 if(next >= iProcessor->GetOutputNumberOfFrames()) |
|
5510 { |
|
5511 return I64INT(iProcessor->GetVideoClipDuration() - iProcessor->VideoFrameTimeStamp(cur) ); |
|
5512 } |
|
5513 else |
|
5514 { |
|
5515 return I64INT( iProcessor->VideoFrameTimeStamp(next) - iProcessor->VideoFrameTimeStamp(cur) ); |
|
5516 } |
|
5517 } |
|
5518 |
|
5519 // --------------------------------------------------------- |
|
5520 // CVideoProcessor::CheckVosHeaderL |
|
5521 // Checks whether the resynch bit is set if set then resets to zero |
|
5522 // (other items were commented in a header). |
|
5523 // @return TBool |
|
5524 // --------------------------------------------------------- |
|
5525 // |
|
5526 |
|
5527 TBool CVideoProcessor::CheckVosHeaderL(TPtrC8& aBuf) |
|
5528 { |
|
5529 return iDecoder->CheckVOSHeaderL((TPtrC8&)aBuf); |
|
5530 } |
|
5531 |
|
5532 |
|
5533 // --------------------------------------------------------- |
|
5534 // CVideoProcessor::RenderFrame |
|
5535 // The H.263 decoder calls this function when a decoded |
|
5536 // frame is available for retrieving |
|
5537 // @return TInt |
|
5538 // --------------------------------------------------------- |
|
5539 // |
|
5540 TInt CVideoProcessor::RenderFrame(TAny* aFrame) |
|
5541 { |
|
5542 iDecoder->FrameRendered(aFrame); |
|
5543 return KErrNone; |
|
5544 } |
|
5545 |
|
5546 // --------------------------------------------------------- |
|
5547 // CVideoProcessor::MtoTimerElapsed |
|
5548 // Called when timer has elapsed |
|
5549 // --------------------------------------------------------- |
|
5550 // |
|
5551 void CVideoProcessor::MtoTimerElapsed(TInt aError) |
|
5552 { |
|
5553 |
|
5554 PRINT((_L("CVideoProcessor::MtoTimerElapsed() begin"))); |
|
5555 |
|
5556 if (aError != KErrNone) |
|
5557 { |
|
5558 iMonitor->Error(aError); |
|
5559 return; |
|
5560 } |
|
5561 |
|
5562 VDASSERT(iFrameInfoArray[0].iEncodeFrame == 1, 110); |
|
5563 |
|
5564 // if next frame in encode queue is an intermediate modification frame |
|
5565 // and modification has not yet been applied, start waiting timer again |
|
5566 if ( ( (iFrameInfoArray[0].iTransitionFrame == 1) || |
|
5567 (iProcessor->GetColorEffect() != EVedColorEffectNone) ) && |
|
5568 iFrameInfoArray[0].iModificationApplied == 0 ) |
|
5569 { |
|
5570 PRINT((_L("CVideoProcessor::MtoTimerElapsed() - modification not done yet, set timer"))); |
|
5571 |
|
5572 iTimer->SetTimer( TTimeIntervalMicroSeconds32( GetEncodingDelay() ) ); |
|
5573 return; |
|
5574 } |
|
5575 |
|
5576 PRINT((_L("CVideoProcessor::MtoTimerElapsed() - removing pic with ts %d ms"), |
|
5577 I64INT(iProcessor->GetVideoTimeInMsFromTicks(iFrameInfoArray[0].iTimeStamp, EFalse)) )) |
|
5578 |
|
5579 // save frame number to be able to recover in case the frame |
|
5580 // gets encoded regardless of the delay |
|
5581 iSkippedFrameNumber = iFrameInfoArray[0].iFrameNumber; |
|
5582 |
|
5583 // remove skipped frame from queue |
|
5584 iFrameInfoArray.Remove(0); |
|
5585 |
|
5586 PRINT((_L("CVideoProcessor::MtoTimerElapsed() - %d items in queue"), iFrameInfoArray.Count())); |
|
5587 |
|
5588 if (iDecodingSuspended && !iStreamEndRead) |
|
5589 { |
|
5590 if (iFrameInfoArray.Count() < iMaxItemsInProcessingQueue && !iDelayedWrite) |
|
5591 { |
|
5592 PRINT((_L("CVideoProcessor::MtoTimerElapsed() - Resume decoding"))); |
|
5593 iDecodingSuspended = EFalse; |
|
5594 // activate object to start decoding |
|
5595 TRequestStatus *status = &iStatus; |
|
5596 User::RequestComplete(status, KErrNone); |
|
5597 return; |
|
5598 } |
|
5599 } |
|
5600 |
|
5601 if ( !IsEncodeQueueEmpty() ) |
|
5602 { |
|
5603 // if there are still frames to be encoded, and next one is waiting, |
|
5604 // set timer again |
|
5605 if ( IsNextFrameBeingEncoded() ) |
|
5606 { |
|
5607 if ( !iTimer->IsPending() ) |
|
5608 { |
|
5609 PRINT((_L("CVideoProcessor::MtoTimerElapsed(), set timer again"))); |
|
5610 iTimer->SetTimer( TTimeIntervalMicroSeconds32( iMaxEncodingDelay ) ); |
|
5611 } |
|
5612 } |
|
5613 return; |
|
5614 } |
|
5615 |
|
5616 if (iDelayedWrite) |
|
5617 { |
|
5618 PRINT((_L("CVideoProcessor::MtoTimerElapsed(), writing delayed frame"))); |
|
5619 // write delayed frame |
|
5620 TRAPD( error, WriteDelayedFrameL() ); |
|
5621 if (error != KErrNone) |
|
5622 { |
|
5623 iMonitor->Error(aError); |
|
5624 return; |
|
5625 } |
|
5626 } |
|
5627 |
|
5628 if ( iStreamEndRead ) |
|
5629 { |
|
5630 if ( iFrameInfoArray.Count() != 0 ) |
|
5631 { |
|
5632 // return now if we have read stream end, but there are still frames waiting |
|
5633 // to be decoded => processing will be completed in MtroPictureFromTranscoder |
|
5634 PRINT((_L("CVideoProcessor::MtoTimerElapsed(), end read but frames in progress, return"))); |
|
5635 return; |
|
5636 } |
|
5637 |
|
5638 else |
|
5639 { |
|
5640 // activate to stop processing |
|
5641 PRINT((_L("CVideoProcessor::MtoTimerElapsed(), end read and all frames processed, stopping"))); |
|
5642 iProcessingComplete = ETrue; |
|
5643 } |
|
5644 } |
|
5645 |
|
5646 if (!IsActive()) |
|
5647 { |
|
5648 SetActive(); |
|
5649 iStatus = KRequestPending; |
|
5650 } |
|
5651 // activate object to continue/end processing |
|
5652 TRequestStatus *status = &iStatus; |
|
5653 User::RequestComplete(status, KErrNone); |
|
5654 PRINT((_L("CVideoProcessor::MtoTimerElapsed() end"))); |
|
5655 } |
|
5656 |
|
5657 // --------------------------------------------------------- |
|
5658 // CVideoProcessor::WriteBufferL |
|
5659 // Called by transcoder to pass an encoded buffer |
|
5660 // --------------------------------------------------------- |
|
5661 // |
|
5662 void CVideoProcessor::WriteBufferL(CCMRMediaBuffer* aBuffer) |
|
5663 { |
|
5664 if ( iFrameInfoArray.Count() == 0 ) |
|
5665 { |
|
5666 PRINT((_L("CVideoProcessor::WriteBufferL() not ready to receive buffer, return"))); |
|
5667 return; |
|
5668 } |
|
5669 |
|
5670 if ( ((iFrameInfoArray[0].iTranscoderMode != EFullWithIM) && |
|
5671 (iFrameInfoArray[0].iTranscoderMode != EFull)) || |
|
5672 (iFrameInfoArray[0].iEncodeFrame != 1) ) |
|
5673 { |
|
5674 PRINT((_L("CVideoProcessor::WriteBufferL() encoded picture received but not expected, ignore & return"))); |
|
5675 return; |
|
5676 } |
|
5677 |
|
5678 |
|
5679 // cancel timer |
|
5680 iTimer->CancelTimer(); |
|
5681 |
|
5682 if (aBuffer->Type() == CCMRMediaBuffer::EVideoMPEG4DecSpecInfo) |
|
5683 { |
|
5684 // should not happen |
|
5685 return; |
|
5686 } |
|
5687 |
|
5688 TTimeIntervalMicroSeconds encodedTs = aBuffer->TimeStamp(); |
|
5689 |
|
5690 PRINT((_L("CVideoProcessor::WriteBufferL(), timestamp %d ms, keyFrame = %d"), |
|
5691 I64INT( encodedTs.Int64() ) / 1000, aBuffer->RandomAccessPoint() )); |
|
5692 |
|
5693 TBool removeItem = ETrue; |
|
5694 TInt64 timeStamp = 0; |
|
5695 TInt frameNumber = 0; |
|
5696 |
|
5697 while (iFrameInfoArray.Count()) |
|
5698 { |
|
5699 TTimeIntervalMicroSeconds ts = (iProcessor->GetVideoTimeInMsFromTicks(iFrameInfoArray[0].iTimeStamp, EFalse)) * 1000; |
|
5700 |
|
5701 if (ts < encodedTs) |
|
5702 { |
|
5703 // a frame has been skipped |
|
5704 iFrameInfoArray.Remove(0); |
|
5705 PRINT((_L("CVideoProcessor::WriteBufferL() frame skipped - number of items in queue: %d"), iFrameInfoArray.Count())); |
|
5706 } |
|
5707 else if (ts > encodedTs) |
|
5708 { |
|
5709 // this frame has most likely been treated as skipped using timer, |
|
5710 // but it was encoded regardless |
|
5711 removeItem = EFalse; |
|
5712 frameNumber = iSkippedFrameNumber; |
|
5713 timeStamp = iProcessor->GetVideoTimeInTicksFromMs( encodedTs.Int64() / 1000, EFalse ); |
|
5714 |
|
5715 PRINT((_L("CVideoProcessor::WriteBufferL() frame skipped falsely, ts in ticks = %d"), I64INT(timeStamp))); |
|
5716 break; |
|
5717 } |
|
5718 else |
|
5719 { |
|
5720 // normal case, encoded frame timestamp is as expected |
|
5721 timeStamp = iFrameInfoArray[0].iTimeStamp; |
|
5722 frameNumber = iFrameInfoArray[0].iFrameNumber; |
|
5723 break; |
|
5724 } |
|
5725 } |
|
5726 |
|
5727 VDASSERT(iFrameInfoArray.Count(), 50); |
|
5728 |
|
5729 // set descriptor for writing the frame |
|
5730 TPtr8 writeDes(0,0); |
|
5731 writeDes.Set(const_cast<TUint8*>(aBuffer->Data().Ptr()), |
|
5732 aBuffer->Data().Length(), aBuffer->Data().Length()); |
|
5733 |
|
5734 HBufC8* tempBuffer = 0; |
|
5735 TInt error; |
|
5736 |
|
5737 if ( (iProcessor->GetOutputVideoType() == EVedVideoTypeMPEG4SimpleProfile) && |
|
5738 (!iOutputVolHeaderWritten) && (frameNumber >= iFirstIncludedFrameNumber) ) |
|
5739 { |
|
5740 VDASSERT(iOutputVolHeader, 51); |
|
5741 |
|
5742 // MPEG-4 output: |
|
5743 // Encoded frame is the first one of the clip, insert VOL header at the beginning |
|
5744 // Allocate a temp buffer to include vol header and the encoded frame |
|
5745 TInt length = iOutputVolHeader->Length() + aBuffer->Data().Length(); |
|
5746 TRAP(error, tempBuffer = (HBufC8*) HBufC8::NewL(length) ); |
|
5747 |
|
5748 if (error != KErrNone) |
|
5749 { |
|
5750 iMonitor->Error(error); |
|
5751 return; |
|
5752 } |
|
5753 |
|
5754 TPtr8 ptr( tempBuffer->Des() ); |
|
5755 ptr.Copy(iOutputVolHeader->Des()); |
|
5756 ptr.Append(aBuffer->Data()); |
|
5757 |
|
5758 writeDes.Set(tempBuffer->Des()); |
|
5759 iOutputVolHeaderWritten = ETrue; |
|
5760 } |
|
5761 |
|
5762 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
5763 if (iProcessor->GetOutputVideoType() == EVedVideoTypeAVCBaselineProfile) |
|
5764 { |
|
5765 |
|
5766 // get number of NAL units |
|
5767 TUint8* tmp = const_cast<TUint8*>(aBuffer->Data().Ptr() + aBuffer->Data().Length()); |
|
5768 |
|
5769 tmp -= 4; |
|
5770 TInt numNalUnits = TInt(tmp[0]) + (TInt(tmp[1])<<8) + (TInt(tmp[2])<<16) + (TInt(tmp[3])<<24); |
|
5771 |
|
5772 TInt totalLength = 0; |
|
5773 |
|
5774 // get nal_unit_type of first NAL |
|
5775 TUint8* type = const_cast<TUint8*>(aBuffer->Data().Ptr()); |
|
5776 TInt nalType = *type & 0x1F; |
|
5777 |
|
5778 PRINT((_L("CVideoProcessor::WriteBufferL() - # of NAL units = %d, frame # = %d, nal_unit_type = %d"), |
|
5779 numNalUnits, frameNumber, nalType)); |
|
5780 |
|
5781 if (nalType != 1 && nalType != 5) |
|
5782 { |
|
5783 // there are extra SPS/PPS units in the beginning |
|
5784 // of the buffer, skip those |
|
5785 numNalUnits--; |
|
5786 |
|
5787 if (numNalUnits == 0) |
|
5788 { |
|
5789 PRINT((_L("CVideoProcessor::WriteBufferL() No NAL units left, return"))); |
|
5790 return; |
|
5791 } |
|
5792 |
|
5793 // point to first length field |
|
5794 tmp = const_cast<TUint8*>(aBuffer->Data().Ptr() + aBuffer->Data().Length()); |
|
5795 tmp -= 4; // # |
|
5796 tmp -= numNalUnits * 8; // offset & length for each NAL |
|
5797 tmp += 4; // skip offset |
|
5798 |
|
5799 // get NAL length |
|
5800 TInt len = TInt(tmp[0]) + (TInt(tmp[1])<<8) + (TInt(tmp[2])<<16) + (TInt(tmp[3])<<24); |
|
5801 type += len; |
|
5802 nalType = *type & 0x1F; |
|
5803 |
|
5804 while (nalType != 1 && nalType != 5 && numNalUnits) |
|
5805 { |
|
5806 numNalUnits--; |
|
5807 tmp += 8; |
|
5808 len = TInt(tmp[0]) + (TInt(tmp[1])<<8) + (TInt(tmp[2])<<16) + (TInt(tmp[3])<<24); |
|
5809 type += len; |
|
5810 nalType = *type & 0x1F; |
|
5811 } |
|
5812 tmp = const_cast<TUint8*>(aBuffer->Data().Ptr() + aBuffer->Data().Length()) - 4; |
|
5813 } |
|
5814 |
|
5815 if (numNalUnits == 0) |
|
5816 { |
|
5817 PRINT((_L("CVideoProcessor::WriteBufferL() No NAL units left, return"))); |
|
5818 return; |
|
5819 } |
|
5820 |
|
5821 // rewind to last length field |
|
5822 tmp -= 4; |
|
5823 |
|
5824 // get total length of slices |
|
5825 for (TInt x = numNalUnits; x > 0; x--) |
|
5826 { |
|
5827 totalLength += TInt(tmp[0]) + (TInt(tmp[1])<<8) + (TInt(tmp[2])<<16) + (TInt(tmp[3])<<24); |
|
5828 tmp -= 8; |
|
5829 } |
|
5830 |
|
5831 TInt tempLength = totalLength + numNalUnits*4; |
|
5832 |
|
5833 // allocate output buffer |
|
5834 TRAP(error, tempBuffer = (HBufC8*) HBufC8::NewL(tempLength) ); |
|
5835 if (error != KErrNone) |
|
5836 { |
|
5837 iMonitor->Error(error); |
|
5838 return; |
|
5839 } |
|
5840 |
|
5841 TUint8* dst = const_cast<TUint8*>(tempBuffer->Des().Ptr()); |
|
5842 TUint8* src = const_cast<TUint8*>(aBuffer->Data().Ptr()); |
|
5843 |
|
5844 // point to first offset field |
|
5845 tmp += 4; |
|
5846 |
|
5847 for (TInt x = numNalUnits; x > 0; x--) |
|
5848 { |
|
5849 // get length |
|
5850 tmp += 4; |
|
5851 TInt length = TInt(tmp[0]) + (TInt(tmp[1])<<8) + (TInt(tmp[2])<<16) + (TInt(tmp[3])<<24); |
|
5852 |
|
5853 // set length |
|
5854 dst[0] = TUint8((length >> 24) & 0xff); |
|
5855 dst[1] = TUint8((length >> 16) & 0xff); |
|
5856 dst[2] = TUint8((length >> 8) & 0xff); |
|
5857 dst[3] = TUint8(length & 0xff); |
|
5858 dst += 4; |
|
5859 |
|
5860 // copy data |
|
5861 TPtr8 ptr(dst, length); |
|
5862 ptr.Copy(src, length); |
|
5863 |
|
5864 dst += length; |
|
5865 src += length; |
|
5866 |
|
5867 // point to next offset field |
|
5868 tmp +=4; |
|
5869 } |
|
5870 |
|
5871 writeDes.Set(tempBuffer->Des()); |
|
5872 writeDes.SetLength(tempLength); |
|
5873 |
|
5874 } |
|
5875 #endif |
|
5876 |
|
5877 // Figure out are we writing frames from the first |
|
5878 // or second clip in color transition |
|
5879 |
|
5880 TBool colorTransitionFlag = ETrue; |
|
5881 TInt index = KNumTransitionFrames / 4; |
|
5882 |
|
5883 if ( iFrameInfoArray[0].iTransitionFrame == 1 && |
|
5884 iFrameInfoArray[0].iTransitionPosition == EPositionStartOfClip && |
|
5885 iStartTransitionColor == EColorTransition ) |
|
5886 { |
|
5887 if ( ( (iFrameInfoArray[0].iTransitionFrameNumber == index) && |
|
5888 (iFrameInfoArray[0].iRepeatFrame == 0) ) || |
|
5889 iFrameInfoArray[0].iTransitionFrameNumber < index ) |
|
5890 { |
|
5891 colorTransitionFlag = EFalse; |
|
5892 } |
|
5893 } |
|
5894 |
|
5895 // write frame |
|
5896 error = iProcessor->WriteVideoFrameToFile((TDesC8&)writeDes, |
|
5897 timeStamp, 0 /*dummy*/, |
|
5898 aBuffer->RandomAccessPoint(), EFalse, colorTransitionFlag, ETrue ); |
|
5899 |
|
5900 if (tempBuffer) |
|
5901 delete tempBuffer; |
|
5902 |
|
5903 if (error == KErrCompletion) |
|
5904 { |
|
5905 PRINT((_L("CVideoProcessor::WriteBufferL() - processing complete"))); |
|
5906 // stop processing |
|
5907 iProcessingComplete = ETrue; |
|
5908 iFrameInfoArray.Reset(); |
|
5909 VDASSERT(iTranscoderStarted, 51); |
|
5910 iTransCoder->StopL(); |
|
5911 iTranscoderStarted = EFalse; |
|
5912 if (!IsActive()) |
|
5913 { |
|
5914 SetActive(); |
|
5915 iStatus = KRequestPending; |
|
5916 } |
|
5917 |
|
5918 // activate object to end processing |
|
5919 TRequestStatus *status = &iStatus; |
|
5920 User::RequestComplete(status, KErrNone); |
|
5921 return; |
|
5922 } |
|
5923 |
|
5924 else if (error != KErrNone) |
|
5925 { |
|
5926 iMonitor->Error(error); |
|
5927 return; |
|
5928 } |
|
5929 |
|
5930 TInt startFrame = iProcessor->GetOutputNumberOfFrames() - iNumberOfFrames; |
|
5931 TInt absFrameNumber = startFrame + frameNumber; |
|
5932 |
|
5933 // save frame number |
|
5934 iLastWrittenFrameNumber = frameNumber; |
|
5935 |
|
5936 iProcessor->SetFrameType(absFrameNumber, aBuffer->RandomAccessPoint()); |
|
5937 |
|
5938 if (removeItem) |
|
5939 { |
|
5940 iFrameInfoArray.Remove(0); |
|
5941 |
|
5942 PRINT((_L("CVideoProcessor::WriteBufferL() - removed encoded pic, %d items in queue"), iFrameInfoArray.Count())); |
|
5943 } |
|
5944 else |
|
5945 PRINT((_L("CVideoProcessor::WriteBufferL() - did not remove encoded pic, %d items in queue"), iFrameInfoArray.Count())); |
|
5946 |
|
5947 |
|
5948 if (iDecodingSuspended && !iStreamEndRead) |
|
5949 { |
|
5950 if (iFrameInfoArray.Count() < iMaxItemsInProcessingQueue && !iDelayedWrite) |
|
5951 { |
|
5952 PRINT((_L("CVideoProcessor::WriteBufferL() - Resume decoding"))); |
|
5953 iDecodingSuspended = EFalse; |
|
5954 // activate object to start decoding |
|
5955 TRequestStatus *status = &iStatus; |
|
5956 User::RequestComplete(status, KErrNone); |
|
5957 return; |
|
5958 } |
|
5959 } |
|
5960 |
|
5961 // if there are still frames to be encoded, start timer again |
|
5962 if ( !IsEncodeQueueEmpty() ) |
|
5963 { |
|
5964 // check if the next frame in queue is waiting to be encoded, set timer if so |
|
5965 if ( IsNextFrameBeingEncoded() ) |
|
5966 { |
|
5967 if ( !iTimer->IsPending() ) |
|
5968 { |
|
5969 PRINT((_L("CVideoProcessor::WriteBufferL(), set timer"))); |
|
5970 iTimer->SetTimer( TTimeIntervalMicroSeconds32( iMaxEncodingDelay ) ); |
|
5971 } |
|
5972 } |
|
5973 return; |
|
5974 } |
|
5975 |
|
5976 if (iStreamEndRead && !iDelayedWrite) |
|
5977 { |
|
5978 |
|
5979 PRINT((_L("CVideoProcessor::WriteBufferL() - stream end read & !iDelayedWrite"))); |
|
5980 |
|
5981 // stream end has been read |
|
5982 if (iFrameInfoArray.Count() == 0) |
|
5983 { |
|
5984 PRINT((_L("CVideoProcessor::WriteBufferL() - stream end read, no frames left"))); |
|
5985 // end |
|
5986 if (!IsActive()) |
|
5987 { |
|
5988 SetActive(); |
|
5989 iStatus = KRequestPending; |
|
5990 } |
|
5991 iProcessingComplete = ETrue; |
|
5992 // activate object to end processing |
|
5993 TRequestStatus *status = &iStatus; |
|
5994 User::RequestComplete(status, KErrNone); |
|
5995 } |
|
5996 // else there are frames to be decoded, processing will be completed |
|
5997 // MtroPictureFromTranscoder |
|
5998 return; |
|
5999 } |
|
6000 |
|
6001 if (iDelayedWrite) |
|
6002 { |
|
6003 if ( IsEncodeQueueEmpty() ) |
|
6004 { |
|
6005 PRINT((_L("CVideoProcessor::WriteBufferL() writing delayed frame"))); |
|
6006 |
|
6007 TRAP(error, WriteDelayedFrameL()); |
|
6008 if (error != KErrNone) |
|
6009 { |
|
6010 iMonitor->Error(error); |
|
6011 return; |
|
6012 } |
|
6013 |
|
6014 if ( iStreamEndRead ) |
|
6015 { |
|
6016 if ( iFrameInfoArray.Count() != 0 ) |
|
6017 { |
|
6018 // return now if we have read stream end, but there are still frames waiting |
|
6019 // to be decoded => processing will be completed in MtroPictureFromTranscoder |
|
6020 PRINT((_L("CVideoProcessor::WriteBufferL(), end read but frames in progress, return"))); |
|
6021 return; |
|
6022 } |
|
6023 else |
|
6024 { |
|
6025 // activate to stop processing |
|
6026 PRINT((_L("CVideoProcessor::WriteBufferL(), end read and all frames processed, stopping"))); |
|
6027 iProcessingComplete = ETrue; |
|
6028 } |
|
6029 } |
|
6030 |
|
6031 if (!IsActive()) |
|
6032 { |
|
6033 SetActive(); |
|
6034 iStatus = KRequestPending; |
|
6035 } |
|
6036 // activate object to continue/end processing |
|
6037 TRequestStatus *status = &iStatus; |
|
6038 User::RequestComplete(status, KErrNone); |
|
6039 return; |
|
6040 } |
|
6041 else |
|
6042 { |
|
6043 // check if the next frame in queue is waiting to be encoded, set timer if so |
|
6044 if ( IsNextFrameBeingEncoded() ) |
|
6045 { |
|
6046 if ( !iTimer->IsPending() ) |
|
6047 { |
|
6048 PRINT((_L("CVideoProcessor::WriteBufferL() - iDelayedWrite, set timer"))); |
|
6049 iTimer->SetTimer( TTimeIntervalMicroSeconds32( iMaxEncodingDelay ) ); |
|
6050 } |
|
6051 return; |
|
6052 } |
|
6053 } |
|
6054 } |
|
6055 |
|
6056 } |
|
6057 |
|
6058 // --------------------------------------------------------- |
|
6059 // CVideoProcessor::IsEncodeQueueEmpty |
|
6060 // (other items were commented in a header) |
|
6061 // --------------------------------------------------------- |
|
6062 // |
|
6063 TBool CVideoProcessor::IsEncodeQueueEmpty() |
|
6064 { |
|
6065 |
|
6066 // check if there are still frames waiting to be encoded |
|
6067 for (TInt i = 0; i < iFrameInfoArray.Count(); i++) |
|
6068 { |
|
6069 if (iFrameInfoArray[i].iEncodeFrame == 1) |
|
6070 { |
|
6071 return EFalse; |
|
6072 } |
|
6073 } |
|
6074 return ETrue; |
|
6075 |
|
6076 } |
|
6077 |
|
6078 // --------------------------------------------------------- |
|
6079 // CVideoProcessor::IsNextFrameBeingEncoded |
|
6080 // (other items were commented in a header) |
|
6081 // --------------------------------------------------------- |
|
6082 // |
|
6083 TBool CVideoProcessor::IsNextFrameBeingEncoded() |
|
6084 { |
|
6085 |
|
6086 // check if the next frame in queue is waiting to be encoded |
|
6087 if ( iFrameInfoArray.Count() && (iFrameInfoArray[0].iEncodeFrame == 1) ) |
|
6088 { |
|
6089 |
|
6090 VDASSERT( ( iFrameInfoArray[0].iTranscoderMode == EFull || |
|
6091 iFrameInfoArray[0].iTranscoderMode == EFullWithIM ), 120 ); |
|
6092 |
|
6093 if ( (iFrameInfoArray[0].iTranscoderMode == EFull) || |
|
6094 (iFrameInfoArray[0].iModificationApplied == 1) ) |
|
6095 { |
|
6096 return ETrue; |
|
6097 } |
|
6098 } |
|
6099 |
|
6100 return EFalse; |
|
6101 |
|
6102 } |
|
6103 |
|
6104 // --------------------------------------------------------- |
|
6105 // CVideoProcessor::GetEncodingDelay |
|
6106 // (other items were commented in a header) |
|
6107 // --------------------------------------------------------- |
|
6108 // |
|
6109 TInt CVideoProcessor::GetEncodingDelay() |
|
6110 { |
|
6111 |
|
6112 // number of decode only -frames in queue before first encode frame |
|
6113 TInt numDecodeFrames = 0; |
|
6114 |
|
6115 TInt i; |
|
6116 for (i = 0; i < iFrameInfoArray.Count(); i++) |
|
6117 { |
|
6118 // get index of next encode frame in queue |
|
6119 if (iFrameInfoArray[i].iEncodeFrame == 1) |
|
6120 break; |
|
6121 else |
|
6122 numDecodeFrames++; |
|
6123 } |
|
6124 |
|
6125 VDASSERT(i < iFrameInfoArray.Count(), 112); |
|
6126 |
|
6127 TInt delay = iMaxEncodingDelay; |
|
6128 |
|
6129 // If the next frame in encoding queue is an intermediate modification frame |
|
6130 // (either transition frame or color effect has to be applied) |
|
6131 // and modification has not been applied to it, double the default delay |
|
6132 if ( ( (iFrameInfoArray[0].iTransitionFrame == 1) || |
|
6133 (iProcessor->GetColorEffect() != EVedColorEffectNone) ) && |
|
6134 iFrameInfoArray[0].iModificationApplied == 0 ) |
|
6135 { |
|
6136 PRINT((_L("CVideoProcessor::GetEncodingDelay() - double the delay"))); |
|
6137 delay <<= 1; |
|
6138 } |
|
6139 |
|
6140 // add time to process decode-only frames to delay |
|
6141 delay += numDecodeFrames * (iMaxEncodingDelay / 2); |
|
6142 |
|
6143 PRINT((_L("CVideoProcessor::GetEncodingDelay() - encoding delay = %d ms, num decode frames %d"), delay/1000, numDecodeFrames)); |
|
6144 |
|
6145 return delay; |
|
6146 |
|
6147 |
|
6148 |
|
6149 } |
|
6150 |
|
6151 // --------------------------------------------------------- |
|
6152 // CVideoProcessor::WriteDelayedFrameL |
|
6153 // (other items were commented in a header) |
|
6154 // --------------------------------------------------------- |
|
6155 // |
|
6156 void CVideoProcessor::WriteDelayedFrameL() |
|
6157 { |
|
6158 |
|
6159 PRINT((_L("CVideoProcessor::WriteDelayedFrameL() begin"))); |
|
6160 |
|
6161 // write the delayed frame |
|
6162 TPtr8 ptr(iDelayedBuffer->Des()); |
|
6163 |
|
6164 TInt error = iProcessor->WriteVideoFrameToFile(ptr, |
|
6165 iDelayedTimeStamp, 0 /*dummy*/, |
|
6166 iDelayedKeyframe, EFalse, EFalse, EFalse ); |
|
6167 |
|
6168 if (error == KErrCompletion) |
|
6169 { |
|
6170 PRINT((_L("CVideoProcessor::WriteDelayedFrameL() write delayed frame, processing complete"))); |
|
6171 VDASSERT(iTranscoderStarted, 51); |
|
6172 iTransCoder->StopL(); |
|
6173 iTranscoderStarted = EFalse; |
|
6174 iFrameInfoArray.Reset(); |
|
6175 iTimer->CancelTimer(); |
|
6176 iProcessingComplete = ETrue; |
|
6177 } |
|
6178 else if (error != KErrNone) |
|
6179 { |
|
6180 User::Leave(error); |
|
6181 } |
|
6182 |
|
6183 // save frame number |
|
6184 iLastWrittenFrameNumber = iDelayedFrameNumber; |
|
6185 |
|
6186 delete iDelayedBuffer; |
|
6187 iDelayedBuffer = 0; |
|
6188 iDelayedWrite = EFalse; |
|
6189 |
|
6190 PRINT((_L("CVideoProcessor::WriteDelayedFrameL() end"))); |
|
6191 |
|
6192 } |
|
6193 |
|
6194 TInt CVideoProcessor::SetVideoFrameSize(TSize /*aSize*/) |
|
6195 { |
|
6196 |
|
6197 PRINT((_L("CVideoProcessor::SetVideoFrameSize()"))) |
|
6198 |
|
6199 return KErrNone; |
|
6200 } |
|
6201 |
|
6202 TInt CVideoProcessor::SetAverageVideoBitRate(TInt /*aBitRate*/) |
|
6203 { |
|
6204 |
|
6205 PRINT((_L("CVideoProcessor::SetAverageVideoBitRate()"))) |
|
6206 |
|
6207 return KErrNone; |
|
6208 } |
|
6209 |
|
6210 |
|
6211 TInt CVideoProcessor::SetMaxVideoBitRate(TInt /*aBitRate*/) |
|
6212 { |
|
6213 PRINT((_L("CVideoProcessor::SetMaxVideoBitRate()"))) |
|
6214 |
|
6215 return KErrNone; |
|
6216 } |
|
6217 |
|
6218 TInt CVideoProcessor::SetAverageAudioBitRate(TInt /*aBitRate*/) |
|
6219 { |
|
6220 PRINT((_L("CVideoProcessor::SetAverageAudioBitRate()"))) |
|
6221 |
|
6222 return KErrNone; |
|
6223 } |
|
6224 |
|
6225 // --------------------------------------------------------- |
|
6226 // CVideoProcessor::SetVideoCodecL() |
|
6227 // Interpret and store video mime type |
|
6228 // --------------------------------------------------------- |
|
6229 // |
|
6230 void CVideoProcessor::SetOutputVideoCodecL(const TPtrC8& aMimeType) |
|
6231 { |
|
6232 TBuf8<256> string; |
|
6233 TBuf8<256> newMimeType; |
|
6234 string = KVedMimeTypeH263; |
|
6235 string += _L8( "*" ); |
|
6236 |
|
6237 iMaxOutputFrameRate = 15.0; |
|
6238 iArbitrarySizeAllowed = EFalse; |
|
6239 |
|
6240 if ( aMimeType.MatchF( (const TDesC8& )string ) != KErrNotFound ) |
|
6241 { |
|
6242 // H.263 |
|
6243 |
|
6244 newMimeType = KVedMimeTypeH263; |
|
6245 |
|
6246 if ( aMimeType.MatchF( _L8("*profile*") ) != KErrNotFound ) |
|
6247 { |
|
6248 // profile given, check if we support it |
|
6249 if ( aMimeType.MatchF( _L8("*profile=0*")) != KErrNotFound ) |
|
6250 { |
|
6251 // profile 0 requested |
|
6252 newMimeType += _L8( "; profile=0" ); |
|
6253 } |
|
6254 else |
|
6255 { |
|
6256 // no other profiles supported |
|
6257 PRINT((_L("CVideoEncoder::SetVideoCodecL() unsupported profile"))); |
|
6258 User::Leave(KErrNotSupported); |
|
6259 } |
|
6260 } |
|
6261 else |
|
6262 { |
|
6263 // no profile is given => assume 0 |
|
6264 newMimeType += _L8( "; profile=0" ); |
|
6265 } |
|
6266 |
|
6267 if ( aMimeType.MatchF( _L8("*level=10*") ) != KErrNotFound ) |
|
6268 { |
|
6269 iMaxOutputBitRate = iOutputBitRate = KVedBitRateH263Level10; |
|
6270 iMaxOutputResolution = KVedResolutionQCIF; |
|
6271 //dataBufferSize = KMaxCodedPictureSizeQCIF; |
|
6272 newMimeType += _L8( "; level=10" ); |
|
6273 } |
|
6274 else if ( aMimeType.MatchF( _L8("*level=45*") ) != KErrNotFound ) |
|
6275 { |
|
6276 iMaxOutputBitRate = iOutputBitRate = KVedBitRateH263Level45; |
|
6277 iMaxOutputResolution = KVedResolutionQCIF; |
|
6278 //dataBufferSize = KMaxCodedPictureSizeQCIF; |
|
6279 newMimeType += _L8( "; level=45" ); |
|
6280 } |
|
6281 else if ( aMimeType.MatchF( _L8("*level*") ) != KErrNotFound ) |
|
6282 { |
|
6283 // no other levels supported |
|
6284 PRINT((_L("CVideoEncoder::SetVideoCodecL() unsupported level"))); |
|
6285 User::Leave(KErrNotSupported); |
|
6286 } |
|
6287 else |
|
6288 { |
|
6289 // if no level is given assume 10 |
|
6290 iMaxOutputBitRate = iOutputBitRate = KVedBitRateH263Level10; |
|
6291 iMaxOutputResolution = KVedResolutionQCIF; |
|
6292 //dataBufferSize = KMaxCodedPictureSizeQCIF; |
|
6293 newMimeType += _L8( "; level=10" ); |
|
6294 } |
|
6295 } |
|
6296 else |
|
6297 { |
|
6298 string = KVedMimeTypeMPEG4Visual; |
|
6299 string += _L8( "*" ); |
|
6300 |
|
6301 if ( aMimeType.MatchF( string ) != KErrNotFound ) |
|
6302 { |
|
6303 // MPEG-4 Visual |
|
6304 newMimeType = KVedMimeTypeMPEG4Visual; |
|
6305 if ( aMimeType.MatchF( _L8("*profile-level-id=8*") ) != KErrNotFound ) |
|
6306 { |
|
6307 // simple profile level 0 |
|
6308 iMaxOutputBitRate = iOutputBitRate = KVedBitRateMPEG4Level0; |
|
6309 iMaxOutputResolution = KVedResolutionQCIF; |
|
6310 // define max size 10K |
|
6311 //dataBufferSize = KMaxCodedPictureSizeMPEG4QCIF; |
|
6312 newMimeType += _L8("; profile-level-id=8"); |
|
6313 } |
|
6314 else if ( aMimeType.MatchF( _L8("*profile-level-id=9*") ) != KErrNotFound ) |
|
6315 { |
|
6316 // simple profile level 0b |
|
6317 iMaxOutputBitRate = iOutputBitRate = KVedBitRateMPEG4Level0; |
|
6318 iMaxOutputResolution = KVedResolutionQCIF; |
|
6319 // define max size 10K |
|
6320 //dataBufferSize = KMaxCodedPictureSizeMPEG4QCIF; |
|
6321 newMimeType += _L8("; profile-level-id=9"); |
|
6322 } |
|
6323 else if ( aMimeType.MatchF( _L8("*profile-level-id=1*") ) != KErrNotFound ) |
|
6324 { |
|
6325 // simple profile level 1 |
|
6326 iMaxOutputBitRate = iOutputBitRate = KVedBitRateMPEG4Level0; |
|
6327 iMaxOutputResolution = KVedResolutionQCIF; |
|
6328 // define max size 10K |
|
6329 //dataBufferSize = KMaxCodedPictureSizeMPEG4QCIF; |
|
6330 iArbitrarySizeAllowed = ETrue; |
|
6331 newMimeType += _L8("; profile-level-id=1"); |
|
6332 } |
|
6333 else if ( aMimeType.MatchF( _L8("*profile-level-id=2*") ) != KErrNotFound ) |
|
6334 { |
|
6335 // simple profile level 2 |
|
6336 //dataBufferSize = KMaxCodedPictureSizeMPEG4CIF; |
|
6337 iMaxOutputResolution = KVedResolutionCIF; |
|
6338 iMaxOutputBitRate = iOutputBitRate = KVedBitRateMPEG4Level2; |
|
6339 iArbitrarySizeAllowed = ETrue; |
|
6340 newMimeType += _L8("; profile-level-id=2"); |
|
6341 } |
|
6342 else if ( aMimeType.MatchF( _L8("*profile-level-id=3*") ) != KErrNotFound ) |
|
6343 { |
|
6344 // simple profile level 3 |
|
6345 //dataBufferSize = KMaxCodedPictureSizeMPEG4CIF; |
|
6346 iMaxOutputBitRate = iOutputBitRate = KVedBitRateMPEG4Level2; |
|
6347 iMaxOutputResolution = KVedResolutionCIF; |
|
6348 iMaxOutputFrameRate = 30.0; |
|
6349 iArbitrarySizeAllowed = ETrue; |
|
6350 newMimeType += _L8("; profile-level-id=3"); |
|
6351 } |
|
6352 else if ( aMimeType.MatchF( _L8("*profile-level-id=4*") ) != KErrNotFound ) |
|
6353 { |
|
6354 // simple profile level 4a |
|
6355 iMaxOutputBitRate = iOutputBitRate = KVedBitRateMPEG4Level4A; |
|
6356 //dataBufferSize = KMaxCodedPictureSizeVGA; |
|
6357 iMaxOutputResolution = KVedResolutionVGA; |
|
6358 iMaxOutputFrameRate = 30.0; |
|
6359 iArbitrarySizeAllowed = ETrue; |
|
6360 newMimeType += _L8("; profile-level-id=4"); |
|
6361 } |
|
6362 else if ( aMimeType.MatchF( _L8("*profile-level-id=*") ) != KErrNotFound ) |
|
6363 { |
|
6364 // no other profile-level ids supported |
|
6365 PRINT((_L("CVideoEncoder::SetVideoCodecL() unsupported MPEG-4 profile-level"))); |
|
6366 User::Leave(KErrNotSupported); |
|
6367 } |
|
6368 else |
|
6369 { |
|
6370 // Default is level 0 in our case (normally probably level 1) |
|
6371 iMaxOutputBitRate = iOutputBitRate = KVedBitRateMPEG4Level0; |
|
6372 iMaxOutputResolution = KVedResolutionQCIF; |
|
6373 // define max size 10K |
|
6374 //dataBufferSize = KMaxCodedPictureSizeMPEG4QCIF; |
|
6375 newMimeType += _L8("; profile-level-id=8"); |
|
6376 } |
|
6377 } |
|
6378 |
|
6379 else |
|
6380 { |
|
6381 |
|
6382 #ifdef VIDEOEDITORENGINE_AVC_EDITING |
|
6383 string = KVedMimeTypeAVC; |
|
6384 string += _L8( "*" ); |
|
6385 |
|
6386 if ( aMimeType.MatchF( string ) != KErrNotFound ) |
|
6387 { |
|
6388 // AVC |
|
6389 newMimeType = KVedMimeTypeAVC; |
|
6390 if ( aMimeType.MatchF( _L8("*profile-level-id=42800A*") ) != KErrNotFound ) |
|
6391 { |
|
6392 // baseline profile level 1 |
|
6393 iMaxOutputBitRate = iOutputBitRate = KVedBitRateAVCLevel1; |
|
6394 iMaxOutputResolution = KVedResolutionQCIF; |
|
6395 newMimeType += _L8("; profile-level-id=42800A"); |
|
6396 } |
|
6397 else if ( aMimeType.MatchF( _L8("*profile-level-id=42900B*") ) != KErrNotFound ) |
|
6398 { |
|
6399 // baseline profile level 1b |
|
6400 iMaxOutputBitRate = iOutputBitRate = KVedBitRateAVCLevel1b; |
|
6401 iMaxOutputResolution = KVedResolutionQCIF; |
|
6402 newMimeType += _L8("; profile-level-id=42900B"); |
|
6403 } |
|
6404 else if ( aMimeType.MatchF( _L8("*profile-level-id=42800B*") ) != KErrNotFound ) |
|
6405 { |
|
6406 // baseline profile level 1.1 |
|
6407 iMaxOutputBitRate = iOutputBitRate = KVedBitRateAVCLevel1_1; |
|
6408 iMaxOutputResolution = KVedResolutionCIF; |
|
6409 newMimeType += _L8("; profile-level-id=42800B"); |
|
6410 } |
|
6411 else if ( aMimeType.MatchF( _L8("*profile-level-id=42800C*") ) != KErrNotFound ) |
|
6412 { |
|
6413 // baseline profile level 1.2 |
|
6414 iMaxOutputBitRate = iOutputBitRate = KVedBitRateAVCLevel1_2; |
|
6415 iMaxOutputResolution = KVedResolutionCIF; |
|
6416 newMimeType += _L8("; profile-level-id=42800C"); |
|
6417 } |
|
6418 //WVGA task |
|
6419 else if ( aMimeType.MatchF( _L8("*profile-level-id=42801E*") ) != KErrNotFound ) |
|
6420 { |
|
6421 // baseline profile level 3.0 |
|
6422 iMaxOutputBitRate = iOutputBitRate = KVedBitRateAVCLevel3; |
|
6423 iMaxOutputResolution = KVedResolutionWVGA; |
|
6424 newMimeType += _L8("; profile-level-id=42801E"); |
|
6425 } |
|
6426 |
|
6427 else if ( aMimeType.MatchF( _L8("*profile-level-id=42801F*") ) != KErrNotFound ) |
|
6428 { |
|
6429 // baseline profile level 3.1 |
|
6430 iMaxOutputBitRate = iOutputBitRate = KVedBitRateAVCLevel3_1; |
|
6431 iMaxOutputResolution = KVedResolutionWVGA; |
|
6432 newMimeType += _L8("; profile-level-id=42801F"); |
|
6433 } |
|
6434 else if ( aMimeType.MatchF( _L8("*profile-level-id=*") ) != KErrNotFound ) |
|
6435 { |
|
6436 // no other profile-level ids supported |
|
6437 PRINT((_L("CVideoEncoder::SetVideoCodecL() unsupported AVC profile-level"))); |
|
6438 User::Leave(KErrNotSupported); |
|
6439 } |
|
6440 else |
|
6441 { |
|
6442 // Default is level 1 (?) |
|
6443 iMaxOutputBitRate = iOutputBitRate = KVedBitRateAVCLevel1; |
|
6444 iMaxOutputResolution = KVedResolutionQCIF; |
|
6445 newMimeType += _L8("; profile-level-id=42800A"); |
|
6446 } |
|
6447 } |
|
6448 |
|
6449 else |
|
6450 { |
|
6451 // unknown mimetype |
|
6452 User::Leave( KErrNotSupported ); |
|
6453 } |
|
6454 #else |
|
6455 |
|
6456 // unknown mimetype |
|
6457 User::Leave( KErrNotSupported ); |
|
6458 |
|
6459 #endif |
|
6460 } |
|
6461 } |
|
6462 |
|
6463 // successfully interpreted the input mime type |
|
6464 iOutputMimeType = newMimeType; |
|
6465 |
|
6466 /*if ( iDataBuffer ) |
|
6467 { |
|
6468 delete iDataBuffer; |
|
6469 iDataBuffer = NULL; |
|
6470 } |
|
6471 iDataBuffer = (HBufC8*) HBufC8::NewL(dataBufferSize); */ |
|
6472 |
|
6473 } |
|
6474 |
|
6475 // --------------------------------------------------------- |
|
6476 // CVideoProcessor::GetVosHeaderSize() |
|
6477 // Gets the size of MPEG-4 VOS header (from encoder) |
|
6478 // --------------------------------------------------------- |
|
6479 // |
|
6480 TInt CVideoProcessor::GetVosHeaderSize() |
|
6481 { |
|
6482 VDASSERT(iOutputVolHeader, 190); |
|
6483 |
|
6484 return iOutputVolHeader->Length(); |
|
6485 } |
|
6486 |
|
6487 // --------------------------------------------------------- |
|
6488 // CCallbackTimer::NewL() |
|
6489 // Two-phased constructor |
|
6490 // --------------------------------------------------------- |
|
6491 // |
|
6492 CCallbackTimer* CCallbackTimer::NewL(MTimerObserver& aObserver) |
|
6493 { |
|
6494 |
|
6495 CCallbackTimer* self = new (ELeave) CCallbackTimer(aObserver); |
|
6496 CleanupStack::PushL(self); |
|
6497 self->ConstructL(); |
|
6498 CleanupStack::Pop(); |
|
6499 |
|
6500 return self; |
|
6501 } |
|
6502 |
|
6503 // --------------------------------------------------------- |
|
6504 // CCallbackTimer::CCallbackTimer() |
|
6505 // C++ default constructor. |
|
6506 // --------------------------------------------------------- |
|
6507 // |
|
6508 CCallbackTimer::CCallbackTimer(MTimerObserver& aObserver) : |
|
6509 CActive(EPriorityStandard), iObserver(aObserver) |
|
6510 |
|
6511 { |
|
6512 } |
|
6513 |
|
6514 // --------------------------------------------------------- |
|
6515 // CCallbackTimer::~CCallbackTimer() |
|
6516 // Destructor |
|
6517 // --------------------------------------------------------- |
|
6518 // |
|
6519 CCallbackTimer::~CCallbackTimer() |
|
6520 { |
|
6521 Cancel(); |
|
6522 |
|
6523 if ( iTimerCreated ) |
|
6524 { |
|
6525 iTimer.Close(); |
|
6526 iTimerCreated = EFalse; |
|
6527 } |
|
6528 } |
|
6529 |
|
6530 // --------------------------------------------------------- |
|
6531 // CCallbackTimer::ConstructL() |
|
6532 // Symbian 2nd phase constructor |
|
6533 // --------------------------------------------------------- |
|
6534 // |
|
6535 void CCallbackTimer::ConstructL() |
|
6536 { |
|
6537 // Create a timer |
|
6538 User::LeaveIfError(iTimer.CreateLocal()); |
|
6539 iTimerCreated = ETrue; |
|
6540 |
|
6541 // Add us to active scheduler |
|
6542 CActiveScheduler::Add(this); |
|
6543 } |
|
6544 |
|
6545 // --------------------------------------------------------- |
|
6546 // CCallbackTimer::SetTimer() |
|
6547 // Set timer |
|
6548 // --------------------------------------------------------- |
|
6549 // |
|
6550 void CCallbackTimer::SetTimer(TTimeIntervalMicroSeconds32 aDuration) |
|
6551 { |
|
6552 |
|
6553 // __ASSERT_DEBUG(!iTimerRequestPending != 0, -5000); //CSI: #174-D: expression has no effect, just an assert debug no effect intended |
|
6554 // __ASSERT_DEBUG(iTimerCreated, -5001); |
|
6555 |
|
6556 PRINT((_L("CCallbackTimer::SetTimer()"))) |
|
6557 |
|
6558 // activate timer to wait for encoding |
|
6559 SetActive(); |
|
6560 iStatus = KRequestPending; |
|
6561 iTimer.After(iStatus, aDuration); |
|
6562 iTimerRequestPending = ETrue; |
|
6563 |
|
6564 } |
|
6565 |
|
6566 // --------------------------------------------------------- |
|
6567 // CCallbackTimer::CancelTimer() |
|
6568 // Cancel timer |
|
6569 // --------------------------------------------------------- |
|
6570 // |
|
6571 void CCallbackTimer::CancelTimer() |
|
6572 { |
|
6573 PRINT((_L("CCallbackTimer::CancelTimer()"))) |
|
6574 Cancel(); |
|
6575 } |
|
6576 |
|
6577 // --------------------------------------------------------- |
|
6578 // CCallbackTimer::RunL() |
|
6579 // AO running method |
|
6580 // --------------------------------------------------------- |
|
6581 // |
|
6582 void CCallbackTimer::RunL() |
|
6583 { |
|
6584 if ( iTimerRequestPending ) |
|
6585 { |
|
6586 iTimerRequestPending = EFalse; |
|
6587 |
|
6588 // call observer |
|
6589 iObserver.MtoTimerElapsed(KErrNone); |
|
6590 } |
|
6591 } |
|
6592 |
|
6593 // --------------------------------------------------------- |
|
6594 // CCallbackTimer::DoCancel() |
|
6595 // AO cancelling method |
|
6596 // --------------------------------------------------------- |
|
6597 // |
|
6598 void CCallbackTimer::DoCancel() |
|
6599 { |
|
6600 |
|
6601 // Cancel our timer request if we have one |
|
6602 if ( iTimerRequestPending ) |
|
6603 { |
|
6604 iTimer.Cancel(); |
|
6605 iTimerRequestPending = EFalse; |
|
6606 return; |
|
6607 } |
|
6608 |
|
6609 } |
|
6610 |
|
6611 // --------------------------------------------------------- |
|
6612 // CCallbackTimer::RunError() |
|
6613 // AO RunL error method |
|
6614 // --------------------------------------------------------- |
|
6615 // |
|
6616 TInt CCallbackTimer::RunError(TInt aError) |
|
6617 { |
|
6618 Cancel(); |
|
6619 |
|
6620 // call observer |
|
6621 iObserver.MtoTimerElapsed(aError); |
|
6622 |
|
6623 return KErrNone; |
|
6624 } |
|
6625 |
|
6626 |
|
6627 // End of File |
|
6628 |