|
1 /* |
|
2 * Copyright (c) 2006 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: Implementation for TNE processor |
|
15 * |
|
16 */ |
|
17 |
|
18 |
|
19 |
|
20 // Include Files |
|
21 #include "TNEProcessorImpl.h" |
|
22 #include "yuv2rgb12.h" |
|
23 #include "yuv2rgb16.h" |
|
24 #include "yuv2rgb24.h" |
|
25 #include "DisplayChain.h" |
|
26 #include "TNEVideosettings.h" |
|
27 #include "ctrsettings.h" |
|
28 #include "mp4parser.h" |
|
29 #include "TNEDecoderWrap.h" |
|
30 |
|
31 // Local Constants |
|
32 const TUint KReadBufInitSize = 512; // stream start buffer initial size |
|
33 const TUint KInitialDataBufferSize = 8192; // initial frame data buffer size |
|
34 const TUint KH263StartCodeLength = 3; // H.263 picture start code length |
|
35 const TUint KMPEG4StartCodeLength = 4; // MPEG4 picture start code length |
|
36 |
|
37 // An assertion macro wrapper to clean up the code a bit |
|
38 #define VPASSERT(x) __ASSERT_DEBUG(x, User::Panic(_L("CTNEProcessorImpl"), EInvalidInternalState)) |
|
39 |
|
40 // An assertion macro wrapper to clean up the code a bit |
|
41 #define VDASSERT(x, n) __ASSERT_DEBUG(x, User::Panic(_L("CTNEProcessorImpl"), EInternalAssertionFailure+n)) |
|
42 |
|
43 #ifdef _DEBUG |
|
44 #include <e32svr.h> |
|
45 #define PRINT(x) RDebug::Print x; |
|
46 #else |
|
47 #define PRINT(x) |
|
48 #endif |
|
49 |
|
50 // ================= MEMBER FUNCTIONS ======================= |
|
51 |
|
52 // ----------------------------------------------------------------------------- |
|
53 // CTNEProcessorImpl::NewL |
|
54 // Two-phased constructor. |
|
55 // ----------------------------------------------------------------------------- |
|
56 // |
|
57 CTNEProcessorImpl* CTNEProcessorImpl::NewL() |
|
58 { |
|
59 CTNEProcessorImpl* self = NewLC(); |
|
60 CleanupStack::Pop(self); |
|
61 return self; |
|
62 } |
|
63 |
|
64 CTNEProcessorImpl* CTNEProcessorImpl::NewLC() |
|
65 { |
|
66 CTNEProcessorImpl* self = new (ELeave) CTNEProcessorImpl(); |
|
67 CleanupStack::PushL(self); |
|
68 self->ConstructL(); |
|
69 return self; |
|
70 } |
|
71 |
|
72 // ----------------------------------------------------------------------------- |
|
73 // CTNEProcessorImpl::CTNEProcessorImpl |
|
74 // C++ default constructor can NOT contain any code, that |
|
75 // might leave. |
|
76 // ----------------------------------------------------------------------------- |
|
77 // |
|
78 CTNEProcessorImpl::CTNEProcessorImpl() |
|
79 : CActive(EPriorityNormal), iReadDes(0, 0) |
|
80 { |
|
81 // Reset state |
|
82 iState = EStateIdle; |
|
83 iFileFormat = EDataAutoDetect; |
|
84 } |
|
85 |
|
86 // ----------------------------------------------------------------------------- |
|
87 // CTNEProcessorImpl::~CTNEProcessorImpl |
|
88 // Destructor. |
|
89 // ----------------------------------------------------------------------------- |
|
90 // |
|
91 CTNEProcessorImpl::~CTNEProcessorImpl() |
|
92 { |
|
93 Cancel(); |
|
94 |
|
95 TInt error = KErrNone; |
|
96 |
|
97 TRAP(error, DoCloseVideoL()); |
|
98 |
|
99 if(iParser) |
|
100 { |
|
101 delete iParser; |
|
102 iParser = 0; |
|
103 } |
|
104 |
|
105 // Deallocate buffers |
|
106 // This is allocated in the clip startFrame() method |
|
107 if (iFrameBuffer) |
|
108 { |
|
109 User::Free(iFrameBuffer); |
|
110 iFrameBuffer = 0; |
|
111 } |
|
112 |
|
113 // Deallocate buffers |
|
114 if (iDataBuffer) |
|
115 { |
|
116 User::Free(iDataBuffer); |
|
117 iDataBuffer = 0; |
|
118 } |
|
119 |
|
120 if (iRgbBuf) |
|
121 { |
|
122 delete iRgbBuf; |
|
123 iRgbBuf = 0; |
|
124 } |
|
125 |
|
126 if (iOutBitmap) |
|
127 { |
|
128 delete iOutBitmap; |
|
129 iOutBitmap = 0; |
|
130 } |
|
131 |
|
132 if (iReadBuf) |
|
133 { |
|
134 User::Free(iReadBuf); |
|
135 iReadBuf = 0; |
|
136 } |
|
137 |
|
138 if (iMediaBuffer) |
|
139 { |
|
140 delete iMediaBuffer; |
|
141 iMediaBuffer = 0; |
|
142 } |
|
143 |
|
144 } |
|
145 |
|
146 // ----------------------------------------------------------------------------- |
|
147 // CTNEProcessorImpl::ConstructL |
|
148 // Symbian 2nd phase constructor can leave. |
|
149 // ----------------------------------------------------------------------------- |
|
150 // |
|
151 void CTNEProcessorImpl::ConstructL() |
|
152 { |
|
153 // Add to active scheduler |
|
154 CActiveScheduler::Add(this); |
|
155 |
|
156 // Allocate buffer |
|
157 iDataBuffer = (TUint8*) User::AllocL(KInitialDataBufferSize); |
|
158 iBufferLength = KInitialDataBufferSize; |
|
159 |
|
160 // Allocate stream reading buffer |
|
161 iReadBuf = (TUint8*) User::AllocL(KReadBufInitSize); |
|
162 iBufLength = KReadBufInitSize; |
|
163 |
|
164 iMediaBuffer = new (ELeave)TVideoBuffer; |
|
165 |
|
166 iDecodePending = EFalse; |
|
167 iDecoding = EFalse; |
|
168 |
|
169 // Flag to indicate if the frame has been decoded |
|
170 iThumbFrameDecoded = EFalse; |
|
171 |
|
172 iState = EStateIdle; |
|
173 } |
|
174 |
|
175 |
|
176 // ----------------------------------------------------------------------------- |
|
177 // CTNEProcessorImpl::Reset |
|
178 // Resets the processor for processing a new movie |
|
179 // (other items were commented in a header). |
|
180 // ----------------------------------------------------------------------------- |
|
181 // |
|
182 void CTNEProcessorImpl::Reset() |
|
183 { |
|
184 |
|
185 iFileFormat = EDataAutoDetect; |
|
186 iVideoType = EVideoH263Profile0Level10; |
|
187 iFirstFrameOfClip = EFalse; |
|
188 iFirstFrameFlagSet = EFalse; |
|
189 |
|
190 iProcessingCancelled = EFalse; |
|
191 |
|
192 iWaitSchedulerStarted = EFalse; |
|
193 |
|
194 iVideoFrameNumber = 0; |
|
195 iFrameBuffered = EFalse; |
|
196 iVideoIntraFrameNumber = 0; |
|
197 iStartThumbIndex = 0; |
|
198 iVideoClipDuration = 0; |
|
199 iDecoderInitPending = EFalse; |
|
200 |
|
201 // @@ YHK HARI AVC frame number flag |
|
202 // need to have a more inclusive approach for |
|
203 // all the different encoded streams |
|
204 iAVCDecodedFrameNumber = 0; |
|
205 |
|
206 // We are now properly initialized |
|
207 iState = EStateIdle; |
|
208 } |
|
209 |
|
210 // ----------------------------------------------------------------------------- |
|
211 // CTNEProcessorImpl::GetClipPropertiesL |
|
212 // Retrieves parameters for a clip |
|
213 // (other items were commented in a header). |
|
214 // ----------------------------------------------------------------------------- |
|
215 // |
|
216 void CTNEProcessorImpl::GetClipPropertiesL(RFile& aFileHandle, |
|
217 TTNEVideoFormat& aFormat, |
|
218 TTNEVideoType& aVideoType, |
|
219 TSize& aResolution, |
|
220 TInt& aVideoFrameCount) |
|
221 { |
|
222 PRINT((_L("CTNEProcessorImpl::GetClipPropertiesL() begin"))) |
|
223 |
|
224 // parse clip header |
|
225 CParser::TStreamParameters iStreamParams; |
|
226 |
|
227 // parse header |
|
228 TRAPD(error, ParseHeaderOnlyL(iStreamParams, aFileHandle)); |
|
229 |
|
230 if (error != KErrNone && error != KErrNotSupported) |
|
231 User::Leave(error); |
|
232 |
|
233 /* pass back clip properties */ |
|
234 |
|
235 // video format (file format actually) |
|
236 if (iStreamParams.iFileFormat == CParser::EFileFormat3GP) |
|
237 aFormat = ETNEVideoFormat3GPP; |
|
238 else if (iStreamParams.iFileFormat == CParser::EFileFormatMP4) |
|
239 aFormat = ETNEVideoFormatMP4; |
|
240 else |
|
241 aFormat = ETNEVideoFormatUnrecognized; |
|
242 |
|
243 // video type |
|
244 if(iStreamParams.iVideoFormat == CParser::EVideoFormatNone) |
|
245 aVideoType = ETNEVideoTypeNoVideo; |
|
246 else if (iStreamParams.iVideoFormat == CParser::EVideoFormatH263Profile0Level10) |
|
247 aVideoType = ETNEVideoTypeH263Profile0Level10; |
|
248 else if (iStreamParams.iVideoFormat == CParser::EVideoFormatH263Profile0Level45) |
|
249 aVideoType = ETNEVideoTypeH263Profile0Level45; |
|
250 else if(iStreamParams.iVideoFormat == CParser::EVideoFormatMPEG4) |
|
251 aVideoType = ETNEVideoTypeMPEG4SimpleProfile; |
|
252 else if(iStreamParams.iVideoFormat == CParser::EVideoFormatAVCProfileBaseline) |
|
253 aVideoType = ETNEVideoTypeAVCProfileBaseline; |
|
254 else |
|
255 aVideoType = ETNEVideoTypeUnrecognized; |
|
256 |
|
257 // resolution |
|
258 aResolution.iWidth = iStreamParams.iVideoWidth; |
|
259 aResolution.iHeight = iStreamParams.iVideoHeight; |
|
260 |
|
261 // get total number of video frames |
|
262 aVideoFrameCount = iParser->GetNumberOfVideoFrames(); |
|
263 |
|
264 PRINT((_L("CTNEProcessorImpl::GetClipPropertiesL() end"))) |
|
265 } |
|
266 |
|
267 // ----------------------------------------------------------------------------- |
|
268 // CTNEProcessorImpl::StartThumbL |
|
269 // Initiates thumbnail extraction from clip (full resolution raw is reutrned) |
|
270 // (other items were commented in a header). |
|
271 // ----------------------------------------------------------------------------- |
|
272 // |
|
273 void CTNEProcessorImpl::StartThumbL(RFile& aFileHandle, |
|
274 TInt aIndex, |
|
275 TSize aResolution, |
|
276 TDisplayMode aDisplayMode, |
|
277 TBool aEnhance) |
|
278 { |
|
279 PRINT((_L("CTNEProcessorImpl::StartThumbL() begin, aIndex = %d, enhance = %d"), aIndex, aEnhance)) |
|
280 |
|
281 // Get thumbnail parameters |
|
282 iOutputThumbResolution.SetSize(aResolution.iWidth, aResolution.iHeight); |
|
283 iThumbIndex = aIndex; |
|
284 iThumbDisplayMode = aDisplayMode; |
|
285 iThumbEnhance = aEnhance; |
|
286 |
|
287 // opens the file & parses header |
|
288 InitializeClipL(aFileHandle); |
|
289 |
|
290 // Allocate memory for iFrameBuffer |
|
291 TInt length = iVideoParameters.iWidth * iVideoParameters.iHeight; |
|
292 length += (length>>1); |
|
293 iFrameBuffer = (TUint8*)User::AllocL(length); |
|
294 |
|
295 PRINT((_L("CTNEProcessorImpl::StartThumbL() end"))) |
|
296 } |
|
297 |
|
298 // ----------------------------------------------------------------------------- |
|
299 // CTNEProcessorImpl::ProcessThumbL |
|
300 // Generates thumbnail from clip (actually, full resolution raw is returned) |
|
301 // (other items were commented in a header). |
|
302 // ----------------------------------------------------------------------------- |
|
303 // |
|
304 void CTNEProcessorImpl::ProcessThumbL(TRequestStatus &aStatus) |
|
305 { |
|
306 PRINT((_L("CTNEProcessorImpl::ProcessThumbL() begin"))) |
|
307 |
|
308 iState = EStateProcessing; |
|
309 iThumbnailRequestStatus = &aStatus; |
|
310 |
|
311 // seek to the last intra frame before desired frame |
|
312 TTimeIntervalMicroSeconds startTime(0); |
|
313 if ( iThumbIndex > 0 ) |
|
314 { |
|
315 TInt time = 0; |
|
316 TUint inMs = TUint( iParser->GetVideoFrameStartTime(iThumbIndex, &time) ); |
|
317 TInt64 inMicroS = TInt64( inMs ) * TInt64( 1000 ); |
|
318 startTime = TTimeIntervalMicroSeconds( inMicroS ); |
|
319 } |
|
320 |
|
321 |
|
322 TInt error = iParser->SeekOptimalIntraFrame(startTime, iThumbIndex); |
|
323 if (error != KErrNone) |
|
324 { |
|
325 iThumbnailRequestStatus = 0; |
|
326 User::Leave(KErrGeneral); |
|
327 } |
|
328 |
|
329 iStartThumbIndex = iParser->GetStartFrameIndex(); |
|
330 |
|
331 // @@ YHK: Try to handle this more inclusively |
|
332 iAVCDecodedFrameNumber = iStartThumbIndex; |
|
333 |
|
334 VPASSERT(iStartThumbIndex >= 0); |
|
335 |
|
336 // determine input stream type |
|
337 TRAP(error, GetFrameL()); |
|
338 |
|
339 iDecoding = ETrue; |
|
340 |
|
341 // Create and Initialize the Decoders |
|
342 TRAP(error, CreateAndInitializeDecoderL()); |
|
343 if (error != KErrNone) |
|
344 { |
|
345 // @@ YHK Do we want to use this flag ?? |
|
346 iThumbnailRequestStatus = 0; |
|
347 User::Leave(KErrGeneral); |
|
348 } |
|
349 } |
|
350 |
|
351 // --------------------------------------------------------- |
|
352 // CH263Decoder::DecodeThumb |
|
353 // Decode a thumbnail frame internally |
|
354 // (other items were commented in a header). |
|
355 // --------------------------------------------------------- |
|
356 // |
|
357 void CTNEProcessorImpl::DecodeThumb(TBool aFirstFrame) |
|
358 { |
|
359 |
|
360 if (aFirstFrame) |
|
361 { |
|
362 // frame read in iDataBuffer, decode |
|
363 TVideoBuffer::TBufferType bt = |
|
364 (iDataFormat == EDataH263) ? TVideoBuffer::EVideoH263 : TVideoBuffer::EVideoMPEG4; |
|
365 |
|
366 TInt startTimeInTicks=0; |
|
367 TInt startTimeInMs = 0; |
|
368 startTimeInMs = iParser->GetVideoFrameStartTime(iStartThumbIndex,&startTimeInTicks); |
|
369 |
|
370 TTimeIntervalMicroSeconds ts = |
|
371 TTimeIntervalMicroSeconds(startTimeInMs * TInt64(1000) ); |
|
372 |
|
373 iMediaBuffer->Set( TPtrC8(iDataBuffer, iBufferLength), |
|
374 bt, |
|
375 iCurrentFrameLength, |
|
376 ETrue, // keyFrame |
|
377 ts |
|
378 ); |
|
379 |
|
380 iPreviousTimeStamp = ts; |
|
381 |
|
382 iDecodePending = ETrue; |
|
383 |
|
384 if (!IsActive()) |
|
385 { |
|
386 SetActive(); |
|
387 iStatus = KRequestPending; |
|
388 } |
|
389 |
|
390 TRAPD( err, iDecoder->WriteCodedBufferL(iMediaBuffer) ); |
|
391 if (err != KErrNone) |
|
392 { |
|
393 // ready |
|
394 MNotifyThumbnailReady(err); |
|
395 return; |
|
396 } |
|
397 |
|
398 TUint freeInputBuffers = iDecoder->GetNumInputFreeBuffers(); |
|
399 |
|
400 // @@ YHK: AVC hack is there a better way of doing this ?? |
|
401 if(iDataFormat == EDataAVC) |
|
402 { |
|
403 if (freeInputBuffers != 0) |
|
404 { |
|
405 // activate object to end processing |
|
406 TRequestStatus *status = &iStatus; |
|
407 User::RequestComplete(status, KErrNone); |
|
408 } |
|
409 } |
|
410 return; |
|
411 } |
|
412 |
|
413 if ((iThumbIndex == 0) && iThumbFrameDecoded) |
|
414 { |
|
415 MNotifyThumbnailReady(KErrNone); |
|
416 return; |
|
417 } |
|
418 |
|
419 iStartThumbIndex++; |
|
420 |
|
421 if ((iThumbIndex < 0) && iThumbFrameDecoded) |
|
422 { |
|
423 if (iFramesToSkip == 0) |
|
424 { |
|
425 PRINT((_L("CH263Decoder::ProcessThumb() frameskip done %d times"), iNumThumbFrameSkips)); |
|
426 |
|
427 // limit the number of frame skip cycles to 3, because with |
|
428 // near-black or near-white videos we may never find a good thumb. |
|
429 // => max. 30 frames are decoded to get the thumb |
|
430 |
|
431 // check quality & frame skip cycles |
|
432 if ( CheckFrameQuality(iFrameBuffer) || iNumThumbFrameSkips >= 3 ) |
|
433 { |
|
434 // quality ok or searched long enough, return |
|
435 MNotifyThumbnailReady(KErrNone); |
|
436 return; |
|
437 } |
|
438 iFramesToSkip = 10; |
|
439 iNumThumbFrameSkips++; |
|
440 } |
|
441 else |
|
442 iFramesToSkip--; |
|
443 |
|
444 // read new frame & decode |
|
445 } |
|
446 |
|
447 if (iThumbFrameDecoded) |
|
448 { |
|
449 iAVCDecodedFrameNumber++; |
|
450 iThumbFrameDecoded = EFalse; |
|
451 } |
|
452 |
|
453 |
|
454 if (iThumbIndex > 0) |
|
455 { |
|
456 // HARI AVC IMP THUMB |
|
457 TInt decodedFrameNumber = (iDataFormat == EDataAVC) ? iAVCDecodedFrameNumber : iStartThumbIndex; |
|
458 |
|
459 if (decodedFrameNumber > iThumbIndex) |
|
460 { |
|
461 // ready |
|
462 MNotifyThumbnailReady(KErrNone); |
|
463 return; |
|
464 } |
|
465 // read new frame & decode |
|
466 } |
|
467 |
|
468 TInt error; |
|
469 |
|
470 if(iStartThumbIndex < iParser->GetNumberOfVideoFrames()) // do not read last frame (already read!) |
|
471 { |
|
472 error = ReadVideoFrame(); |
|
473 if (error != KErrNone) |
|
474 { |
|
475 MNotifyThumbnailReady(error); |
|
476 return; |
|
477 } |
|
478 } |
|
479 else |
|
480 { |
|
481 // no frames left, return |
|
482 MNotifyThumbnailReady(KErrNone); |
|
483 return; |
|
484 } |
|
485 |
|
486 iCurrentFrameLength = 0; |
|
487 iDataFormat = EDataUnknown; |
|
488 |
|
489 if (ReadAndUpdateFrame()) |
|
490 { |
|
491 // frame read in iDataBuffer, decode |
|
492 TVideoBuffer::TBufferType bt = |
|
493 (iDataFormat == EDataH263) ? TVideoBuffer::EVideoH263 : TVideoBuffer::EVideoMPEG4; |
|
494 |
|
495 TInt startTimeInTicks=0; |
|
496 TInt startTimeInMs = 0; |
|
497 startTimeInMs = iParser->GetVideoFrameStartTime(iStartThumbIndex,&startTimeInTicks); |
|
498 |
|
499 TTimeIntervalMicroSeconds ts = |
|
500 TTimeIntervalMicroSeconds(startTimeInMs * TInt64(1000) ); |
|
501 |
|
502 if (ts <= iPreviousTimeStamp) |
|
503 { |
|
504 // adjust timestamp so that its bigger than ts of previous frame |
|
505 TReal frameRate = GetVideoClipFrameRate(); |
|
506 |
|
507 TInt64 durationMs = TInt64( ( 1000.0 / frameRate ) + 0.5 ); |
|
508 durationMs /= 2; // add half the duration of one frame |
|
509 |
|
510 ts = TTimeIntervalMicroSeconds( iPreviousTimeStamp.Int64() + durationMs*1000 ); |
|
511 } |
|
512 |
|
513 iPreviousTimeStamp = ts; |
|
514 |
|
515 iMediaBuffer->Set( TPtrC8(iDataBuffer, iBufferLength), |
|
516 bt, |
|
517 iCurrentFrameLength, |
|
518 GetVideoFrameType(iStartThumbIndex), |
|
519 ts ); |
|
520 |
|
521 iDecodePending = ETrue; |
|
522 if (!IsActive()) |
|
523 { |
|
524 SetActive(); |
|
525 iStatus = KRequestPending; |
|
526 } |
|
527 |
|
528 TRAPD( err, iDecoder->WriteCodedBufferL(iMediaBuffer) ); |
|
529 if (err != KErrNone) |
|
530 { |
|
531 MNotifyThumbnailReady(err); |
|
532 } |
|
533 |
|
534 TUint freeInputBuffers = iDecoder->GetNumInputFreeBuffers(); |
|
535 |
|
536 // HARI AVC hack is there a better way of doing this ?? |
|
537 if(iDataFormat == EDataAVC) |
|
538 { |
|
539 if (freeInputBuffers != 0) |
|
540 { |
|
541 // activate object to end processing |
|
542 TRequestStatus *status = &iStatus; |
|
543 User::RequestComplete(status, KErrNone); |
|
544 } |
|
545 } |
|
546 |
|
547 return; |
|
548 } |
|
549 else |
|
550 { |
|
551 MNotifyThumbnailReady(KErrCorrupt); |
|
552 return; |
|
553 } |
|
554 } |
|
555 |
|
556 void CTNEProcessorImpl::MSendEncodedBuffer() |
|
557 { |
|
558 DecodeThumb(EFalse); |
|
559 } |
|
560 |
|
561 |
|
562 void CTNEProcessorImpl::MPictureFromDecoder(TVideoPicture* aPicture) |
|
563 { |
|
564 TInt yuvLength = iVideoParameters.iWidth*iVideoParameters.iHeight; |
|
565 yuvLength += (yuvLength >> 1); |
|
566 |
|
567 // Indicate that the decoded frame has been received |
|
568 iThumbFrameDecoded = ETrue; |
|
569 |
|
570 // copy to iFrameBuffer |
|
571 Mem::Copy(iFrameBuffer, aPicture->iData.iRawData->Ptr(), yuvLength); |
|
572 |
|
573 // release picture |
|
574 TInt error = KErrNone; |
|
575 TRAP( error, iDecoder->ReturnPicture(aPicture) ); |
|
576 if ( error != KErrNone ) |
|
577 { |
|
578 MNotifyThumbnailReady(error); |
|
579 return; |
|
580 } |
|
581 |
|
582 //VDASSERT(iDecodePending, 33); |
|
583 // complete request |
|
584 TRequestStatus *status = &iStatus; |
|
585 User::RequestComplete(status, KErrNone); |
|
586 |
|
587 return; |
|
588 } |
|
589 |
|
590 void CTNEProcessorImpl::MReturnCodedBuffer(TVideoBuffer* /*aBuffer*/) |
|
591 { |
|
592 // Don't have to do anything here |
|
593 return; |
|
594 } |
|
595 |
|
596 // --------------------------------------------------------- |
|
597 // CTNEProcessorImpl::CreateAndInitializeDecoderL |
|
598 // Create and initialize decoder |
|
599 // (other items were commented in a header). |
|
600 // --------------------------------------------------------- |
|
601 // |
|
602 void CTNEProcessorImpl::CreateAndInitializeDecoderL() |
|
603 { |
|
604 PRINT((_L("CTNEProcessorImpl::CreateAndInitializeDecoderL() begin"))); |
|
605 TTRVideoFormat videoInputFormat; |
|
606 |
|
607 // parse the mime type: Get the codeclevel, max bitrate etc... |
|
608 ParseMimeTypeL(); |
|
609 |
|
610 // Create decoder wrapper object |
|
611 iDecoder = CTNEDecoderWrap::NewL(this); |
|
612 |
|
613 // Check to see if this mime type is supported |
|
614 if ( !(iDecoder->SupportsCodec(iMimeType, iShortMimeType)) ) |
|
615 { |
|
616 User::Leave(KErrNotSupported); |
|
617 } |
|
618 |
|
619 videoInputFormat.iSize = TSize(iVideoParameters.iWidth, iVideoParameters.iHeight); |
|
620 videoInputFormat.iDataType = ETRDuCodedPicture; |
|
621 |
|
622 // set the codec params |
|
623 iDecoder->SetDecoderParametersL(iCodecLevel, videoInputFormat); |
|
624 |
|
625 iDecoderInitPending = ETrue; |
|
626 |
|
627 // Activate the processor object |
|
628 if (!IsActive()) |
|
629 { |
|
630 SetActive(); |
|
631 |
|
632 TRequestStatus* status = &iStatus; |
|
633 User::RequestComplete(status, KErrNone); |
|
634 } |
|
635 |
|
636 // initialize the decoder |
|
637 iDecoder->InitializeL(); |
|
638 |
|
639 } |
|
640 |
|
641 // ----------------------------------------------------------------------------- |
|
642 // CTRTranscoderImp::ParseMimeTypeL |
|
643 // Parses given MIME type |
|
644 // (other items were commented in a header). |
|
645 // ----------------------------------------------------------------------------- |
|
646 // |
|
647 void CTNEProcessorImpl::ParseMimeTypeL() |
|
648 { |
|
649 TUint maxBitRate = 0; |
|
650 TInt codecType = 0; |
|
651 TBuf8<256> shortMimeType; |
|
652 TBuf8<256> newMimeType; |
|
653 TInt width = iVideoParameters.iWidth; |
|
654 TUint codecLevel = 0; |
|
655 |
|
656 if ( iDataFormat == EDataH263 ) |
|
657 { |
|
658 // H.263 |
|
659 codecType = EH263; |
|
660 shortMimeType = _L8("video/H263-2000"); |
|
661 newMimeType = shortMimeType; |
|
662 |
|
663 switch( width ) |
|
664 { |
|
665 case KTRSubQCIFWidth: |
|
666 case KTRQCIFWidth: |
|
667 { |
|
668 // Set defaults for level=10; |
|
669 maxBitRate = KTRMaxBitRateH263Level10; |
|
670 codecLevel = KTRH263CodecLevel10; |
|
671 newMimeType += _L8("; level=10"); |
|
672 break; |
|
673 } |
|
674 |
|
675 case KTRCIFWidth: |
|
676 { |
|
677 // Set defaults for level=30; |
|
678 maxBitRate = KTRMaxBitRateH263Level30; |
|
679 codecLevel = KTRH263CodecLevel30; |
|
680 newMimeType += _L8("; level=30"); |
|
681 break; |
|
682 } |
|
683 |
|
684 case KTRPALWidth: |
|
685 { |
|
686 // Set defaults for level=60; |
|
687 maxBitRate = KTRMaxBitRateH263Level60; |
|
688 codecLevel = KTRH263CodecLevel60; |
|
689 newMimeType += _L8("; level=60"); |
|
690 break; |
|
691 } |
|
692 |
|
693 default: |
|
694 { |
|
695 // Set defaults for level=10; |
|
696 maxBitRate = KTRMaxBitRateH263Level10; |
|
697 codecLevel = KTRH263CodecLevel10; |
|
698 newMimeType += _L8("; level=10"); |
|
699 break; |
|
700 } |
|
701 } |
|
702 } |
|
703 else if ( iDataFormat == EDataMPEG4 ) |
|
704 { |
|
705 // MPEG-4 Visual |
|
706 codecType = EMpeg4; |
|
707 shortMimeType = _L8("video/mp4v-es"); // Set short mime |
|
708 newMimeType = shortMimeType; |
|
709 |
|
710 switch( width ) |
|
711 { |
|
712 case KTRSubQCIFWidth: |
|
713 case KTRQCIFWidth: |
|
714 { |
|
715 // Set profile-level-id=0 |
|
716 codecLevel = KTRMPEG4CodecLevel0; |
|
717 maxBitRate = KTRMaxBitRateMPEG4Level0; |
|
718 newMimeType += _L8("; profile-level-id=8"); |
|
719 break; |
|
720 } |
|
721 |
|
722 case KTRQVGAWidth: |
|
723 case KTRCIFWidth: |
|
724 { |
|
725 // Set profile-level-id=3 |
|
726 maxBitRate = KTRMaxBitRateMPEG4Level3; |
|
727 codecLevel = KTRMPEG4CodecLevel3; |
|
728 newMimeType += _L8("; profile-level-id=3"); |
|
729 break; |
|
730 } |
|
731 |
|
732 case KTRVGAWidth: |
|
733 { |
|
734 // Set profile-level-id=4 (4a) |
|
735 maxBitRate = KTRMaxBitRateMPEG4Level4a; |
|
736 codecLevel = KTRMPEG4CodecLevel4a; |
|
737 newMimeType += _L8("; profile-level-id=4"); |
|
738 break; |
|
739 } |
|
740 |
|
741 default: |
|
742 { |
|
743 // Set profile-level-id=0 |
|
744 maxBitRate = KTRMaxBitRateMPEG4Level0; |
|
745 codecLevel = KTRMPEG4CodecLevel0; |
|
746 newMimeType += _L8("; profile-level-id=8"); |
|
747 break; |
|
748 } |
|
749 } |
|
750 } |
|
751 else if (iDataFormat == EDataAVC) |
|
752 { |
|
753 // @@ YHK this is a hack for AVC fix it later.... |
|
754 // @@ YHK Imp ***************** |
|
755 |
|
756 codecType = EH264; |
|
757 shortMimeType = _L8("video/H264"); // Set short mime |
|
758 newMimeType = shortMimeType; |
|
759 |
|
760 codecLevel = KTRMPEG4CodecLevel0; |
|
761 maxBitRate = KTRMaxBitRateMPEG4Level0; |
|
762 newMimeType += _L8("; profile-level-id=428014"); |
|
763 |
|
764 } |
|
765 else |
|
766 { |
|
767 PRINT((_L("CTRTranscoderImp::ParseMimeL(), there is curently no support for this type"))) |
|
768 User::Leave(KErrNotSupported); |
|
769 } |
|
770 |
|
771 // Mime type was set for Input format |
|
772 iCodecLevel = codecLevel; |
|
773 iCodec = codecType; |
|
774 iMaxBitRate = maxBitRate; |
|
775 |
|
776 iMimeType = newMimeType; |
|
777 iShortMimeType = shortMimeType; |
|
778 |
|
779 } |
|
780 |
|
781 // --------------------------------------------------------- |
|
782 // CTNEProcessorImpl::GetFrameL |
|
783 // Gets the transcode factor from the current clip |
|
784 // (other items were commented in a header). |
|
785 // --------------------------------------------------------- |
|
786 // |
|
787 TInt CTNEProcessorImpl::GetFrameL() |
|
788 { |
|
789 // Read the video frame into buffer |
|
790 TInt error = ReadVideoFrame(); |
|
791 |
|
792 // seek to and decode first frame |
|
793 if (!ReadAndUpdateFrame()) |
|
794 User::Leave(KErrCorrupt); |
|
795 |
|
796 return KErrNone; |
|
797 } |
|
798 |
|
799 |
|
800 |
|
801 // --------------------------------------------------------- |
|
802 // CTNEProcessorImpl::ReadVideoFrame |
|
803 // Gets the transcode factor from the current clip |
|
804 // (other items were commented in a header). |
|
805 // --------------------------------------------------------- |
|
806 // |
|
807 TInt CTNEProcessorImpl::ReadVideoFrame() |
|
808 { |
|
809 TUint frameLen; |
|
810 TFrameType frameType = EFrameTypeVideo; |
|
811 TBool frameAvailable = 0; |
|
812 TPtr8 readDes(0,0); |
|
813 TUint32 numReadFrames = 0; |
|
814 TUint32 timeStamp; |
|
815 |
|
816 // Get the next frame information |
|
817 TInt error = iParser->GetNextFrameInformation(frameType, |
|
818 frameLen, |
|
819 frameAvailable); |
|
820 |
|
821 if (error !=KErrNone) |
|
822 return error; |
|
823 |
|
824 VPASSERT(frameAvailable); |
|
825 |
|
826 while (iBufferLength < frameLen) |
|
827 { |
|
828 // New size is 3/2ths of the old size, rounded up to the next |
|
829 // full kilobyte |
|
830 TUint newSize = (3 * iBufferLength) / 2; |
|
831 newSize = (newSize + 1023) & (~1023); |
|
832 |
|
833 TUint8* tmp = (TUint8*) User::ReAlloc(iDataBuffer, newSize); |
|
834 if (!tmp) |
|
835 { |
|
836 return EFalse; |
|
837 } |
|
838 |
|
839 iDataBuffer = tmp; |
|
840 iBufferLength = newSize; |
|
841 } |
|
842 |
|
843 iDataLength = frameLen; |
|
844 |
|
845 |
|
846 if (iBufferLength < iDataLength) |
|
847 { |
|
848 // need to allocate a bigger buffer |
|
849 User::Panic(_L("CVideoPlayer"), EInvalidInternalState); |
|
850 } |
|
851 |
|
852 |
|
853 // @@ YHK clean up replace *p with the iDataBuffer directly |
|
854 // make space for timestamp |
|
855 TUint8 *p = iDataBuffer; |
|
856 readDes.Set(p, 0, TInt(frameLen)); |
|
857 |
|
858 |
|
859 // @@ YHK check frameType do we need to send it ?? |
|
860 // is the parser smart enopugh to get the video frame |
|
861 // type or do we need to skip the audio frame ??? |
|
862 |
|
863 // @@ YHK test this scenario wiht Nth frame |
|
864 // and how do we read the frame from the stream |
|
865 |
|
866 // read frame(s) from parser |
|
867 error = iParser->ReadFrames(readDes, frameType, |
|
868 numReadFrames, timeStamp); |
|
869 |
|
870 if ( error != KErrNone ) |
|
871 return error; |
|
872 |
|
873 VPASSERT( numReadFrames > 0 ); |
|
874 |
|
875 // @@ YHK We dont need the TS, its not used anywhere |
|
876 // put timestamp in the output block before the actual frame data |
|
877 // Mem::Copy(iDataBuffer, &timeStamp, 4); |
|
878 |
|
879 // set the frame length back to zero |
|
880 // frameLen = 0; |
|
881 |
|
882 return KErrNone; |
|
883 } |
|
884 |
|
885 |
|
886 // --------------------------------------------------------- |
|
887 // CTNEProcessorImpl::ReadAndUpdateFrame |
|
888 // Read the encoded frame and Update information |
|
889 // (other items were commented in a header). |
|
890 // --------------------------------------------------------- |
|
891 // |
|
892 TInt CTNEProcessorImpl::ReadAndUpdateFrame() |
|
893 { |
|
894 // Determine data format if needed |
|
895 if ( iDataFormat == EDataUnknown ) |
|
896 { |
|
897 // OK, we have 4 bytes of data. Check if the buffer starts with a |
|
898 // H.263 PSC: |
|
899 |
|
900 if(iParser->iStreamParameters.iVideoFormat == CParser::EVideoFormatAVCProfileBaseline) |
|
901 { |
|
902 iDataFormat = EDataAVC; |
|
903 } |
|
904 else if ( (iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && |
|
905 ((iDataBuffer[2] & 0xfc) == 0x80) ) |
|
906 { |
|
907 // Yes, this is a H.263 stream |
|
908 iDataFormat = EDataH263; |
|
909 } |
|
910 |
|
911 // It should be MPEG-4, check if it starts with MPEG 4 Visual |
|
912 // Object Sequence start code, Visual Object start code, Video |
|
913 // Object start code, or Video Object Layer start code |
|
914 else if ( ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && (iDataBuffer[3] == 0xb0)) || |
|
915 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && (iDataBuffer[3] == 0xb6)) || |
|
916 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && (iDataBuffer[3] == 0xb3)) || |
|
917 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && (iDataBuffer[3] == 0xb5)) || |
|
918 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && ((iDataBuffer[3] >> 5) == 0)) || |
|
919 ((iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && (iDataBuffer[2] == 1) && ((iDataBuffer[3] >> 4) == 2))) |
|
920 { |
|
921 iDataFormat = EDataMPEG4; |
|
922 } |
|
923 else |
|
924 { |
|
925 PRINT((_L("CH263Decoder::ReadFrame() - no PSC or MPEG-4 start code in the start of the buffer"))); |
|
926 return EFalse; |
|
927 } |
|
928 } |
|
929 |
|
930 // Determine the start code length |
|
931 TUint startCodeLength = 0; |
|
932 switch (iDataFormat) |
|
933 { |
|
934 case EDataH263: |
|
935 startCodeLength = KH263StartCodeLength; |
|
936 break; |
|
937 case EDataMPEG4: |
|
938 startCodeLength = KMPEG4StartCodeLength ; |
|
939 break; |
|
940 case EDataAVC: |
|
941 break; |
|
942 |
|
943 default: |
|
944 User::Panic(_L("CVideoPlayer"), EInvalidInternalState); |
|
945 } |
|
946 |
|
947 // If the stream has ended, we have no blocks and no data for even a |
|
948 // picture start code, we can't get a frame |
|
949 if( iDataFormat == EDataH263 ) |
|
950 { |
|
951 if ((iCurrentFrameLength <= startCodeLength) && (iDataLength <= startCodeLength) ) |
|
952 return EFalse; |
|
953 } |
|
954 else |
|
955 { |
|
956 if ( (iCurrentFrameLength <= startCodeLength) && (iDataLength < startCodeLength) ) |
|
957 return EFalse; |
|
958 } |
|
959 |
|
960 // When reading H.263, the buffer always starts with the PSC of the |
|
961 // current frame |
|
962 if (iDataFormat == EDataH263) |
|
963 { |
|
964 |
|
965 // There should be one PSC at the buffer start, and no other PSCs up to |
|
966 // iDataLength |
|
967 if ( (iDataLength >= KH263StartCodeLength) && |
|
968 ((iDataBuffer[0] != 0) || (iDataBuffer[1] != 0) || ((iDataBuffer[2] & 0xfc) != 0x80)) ) |
|
969 { |
|
970 PRINT((_L("CH263Decoder::ReadFrame() - no PSC in the start of the buffer"))) |
|
971 return EFalse; |
|
972 } |
|
973 if (iCurrentFrameLength < KH263StartCodeLength ) |
|
974 iCurrentFrameLength = KH263StartCodeLength; |
|
975 |
|
976 TBool gotPSC = EFalse; |
|
977 while (!gotPSC) |
|
978 { |
|
979 // If we don't have a block at the moment, get one and check if it |
|
980 // has a new PSC |
|
981 |
|
982 // If we are at the start of a block, check if it begins with a PSC |
|
983 if ( (iDataLength > 2) && |
|
984 ( (iDataBuffer[0] == 0) && (iDataBuffer[1] == 0) && ((iDataBuffer[2] & 0xfc) == 0x80) ) ) |
|
985 { |
|
986 gotPSC = ETrue; |
|
987 iCurrentFrameLength = iDataLength; |
|
988 } |
|
989 else |
|
990 { |
|
991 PRINT((_L("CH263Decoder::ReadFrame() - no PSC in the start of the buffer"))) |
|
992 return EFalse; |
|
993 } |
|
994 } |
|
995 return ETrue; |
|
996 } |
|
997 else if (iDataFormat == EDataMPEG4) |
|
998 { |
|
999 // MPEG-4 |
|
1000 |
|
1001 // check for VOS end code |
|
1002 if ( (iDataBuffer[0] == 0 ) && (iDataBuffer[1] == 0 ) && |
|
1003 (iDataBuffer[2] == 0x01) && (iDataBuffer[3] == 0xb1) ) |
|
1004 return EFalse; |
|
1005 |
|
1006 // insert VOP start code at the end, the decoder needs it |
|
1007 iDataBuffer[iDataLength++] = 0; |
|
1008 iDataBuffer[iDataLength++] = 0; |
|
1009 iDataBuffer[iDataLength++] = 0x01; |
|
1010 iDataBuffer[iDataLength++] = 0xb6; |
|
1011 iCurrentFrameLength = iDataLength; |
|
1012 |
|
1013 // we have a complete frame |
|
1014 return ETrue; |
|
1015 } |
|
1016 else |
|
1017 { |
|
1018 // Allocate buffer |
|
1019 // @@ YHK need to come up with some decent value other than 100 |
|
1020 TUint8* tmpPtr = (TUint8*) User::AllocL(iDataLength + 100); |
|
1021 TInt dLen = 0; |
|
1022 TInt skip = 0; |
|
1023 TUint32 nalSize = 0; |
|
1024 |
|
1025 if(iFirstFrameOfClip) |
|
1026 { |
|
1027 // Set the flag to false. |
|
1028 iFirstFrameOfClip = EFalse; |
|
1029 |
|
1030 /////////////////////////////////////////////////////////////////// |
|
1031 /* |
|
1032 AVC Decoder Configuration |
|
1033 ------------------------- |
|
1034 aligned(8) class AVCDecoderConfigurationRecord { |
|
1035 unsigned int(8) configurationVersion = 1; |
|
1036 unsigned int(8) AVCProfileIndication; |
|
1037 unsigned int(8) profile_compatibility; |
|
1038 unsigned int(8) AVCLevelIndication; |
|
1039 bit(6) reserved = ‘111111’b; |
|
1040 unsigned int(2) lengthSizeMinusOne; |
|
1041 bit(3) reserved = ‘111’b; |
|
1042 unsigned int(5) numOfSequenceParameterSets; |
|
1043 for (i=0; i< numOfSequenceParameterSets; i++) { |
|
1044 unsigned int(16) sequenceParameterSetLength ; |
|
1045 bit(8*sequenceParameterSetLength) sequenceParameterSetNALUnit; |
|
1046 } |
|
1047 unsigned int(8) numOfPictureParameterSets; |
|
1048 for (i=0; i< numOfPictureParameterSets; i++) { |
|
1049 unsigned int(16) pictureParameterSetLength; |
|
1050 bit(8*pictureParameterSetLength) pictureParameterSetNALUnit; |
|
1051 } |
|
1052 */ |
|
1053 ////////////////////////////////////////////////////////////////////// |
|
1054 |
|
1055 // Copy the first 4 bytes for config version, profile indication |
|
1056 // profile compatibility and AVC level indication |
|
1057 Mem::Copy(&tmpPtr[dLen], iDataBuffer, 4); |
|
1058 dLen += 4; |
|
1059 skip += 4; |
|
1060 |
|
1061 // copy 1 byte for bit(6) reserved = ‘111111’b; |
|
1062 // unsigned int(2) lengthSizeMinusOne; |
|
1063 Mem::Copy(&tmpPtr[dLen], iDataBuffer + skip, 1); |
|
1064 dLen += 1; |
|
1065 |
|
1066 iNalUnitBytes = (0x3 & iDataBuffer[skip]) + 1; |
|
1067 skip += 1; |
|
1068 |
|
1069 // SSP packets |
|
1070 Mem::Copy(&tmpPtr[dLen], iDataBuffer + skip, 1); |
|
1071 dLen += 1; |
|
1072 |
|
1073 TInt numOfSSP = 0x1F & iDataBuffer[skip]; |
|
1074 skip += 1; |
|
1075 |
|
1076 for (TInt i = 0; i < numOfSSP; i++) |
|
1077 { |
|
1078 TInt sspSize = iDataBuffer[skip]*256 + iDataBuffer[skip+1]; |
|
1079 skip += 2; |
|
1080 |
|
1081 tmpPtr[dLen++] = 0; |
|
1082 tmpPtr[dLen++] = 0; |
|
1083 tmpPtr[dLen++] = 0; |
|
1084 tmpPtr[dLen++] = 0x01; |
|
1085 |
|
1086 Mem::Copy(&tmpPtr[dLen], iDataBuffer + skip, sspSize); |
|
1087 |
|
1088 skip += sspSize; |
|
1089 dLen += sspSize; |
|
1090 } |
|
1091 |
|
1092 // PSP packets |
|
1093 Mem::Copy(&tmpPtr[dLen], iDataBuffer + skip, 1); |
|
1094 dLen += 1; |
|
1095 |
|
1096 TInt numOfPSP = iDataBuffer[skip]; |
|
1097 skip += 1; |
|
1098 |
|
1099 for (TInt i = 0; i < numOfPSP; i++) |
|
1100 { |
|
1101 TInt pspSize = iDataBuffer[skip]*256 + iDataBuffer[skip+1]; |
|
1102 skip += 2; |
|
1103 |
|
1104 tmpPtr[dLen++] = 0; |
|
1105 tmpPtr[dLen++] = 0; |
|
1106 tmpPtr[dLen++] = 0; |
|
1107 tmpPtr[dLen++] = 0x01; |
|
1108 |
|
1109 Mem::Copy(&tmpPtr[dLen], iDataBuffer + skip, pspSize); |
|
1110 |
|
1111 skip += pspSize; |
|
1112 dLen += pspSize; |
|
1113 } |
|
1114 } |
|
1115 |
|
1116 while(skip < iDataLength) |
|
1117 { |
|
1118 switch (iNalUnitBytes) |
|
1119 { |
|
1120 case 1: |
|
1121 nalSize = iDataBuffer[skip]; |
|
1122 break; |
|
1123 case 2: |
|
1124 nalSize = iDataBuffer[skip]*256 + iDataBuffer[skip+1]; |
|
1125 break; |
|
1126 case 4: |
|
1127 nalSize = iDataBuffer[skip]*14336 + iDataBuffer[skip+1]*4096 + iDataBuffer[skip+2]*256 + iDataBuffer[skip+3]; |
|
1128 break; |
|
1129 } |
|
1130 |
|
1131 skip += iNalUnitBytes; |
|
1132 |
|
1133 tmpPtr[dLen++] = 0; |
|
1134 tmpPtr[dLen++] = 0; |
|
1135 tmpPtr[dLen++] = 0; |
|
1136 tmpPtr[dLen++] = 0x01; |
|
1137 |
|
1138 Mem::Copy(&tmpPtr[dLen], iDataBuffer + skip, nalSize); |
|
1139 |
|
1140 skip += nalSize; |
|
1141 dLen += nalSize; |
|
1142 } |
|
1143 |
|
1144 Mem::Copy(iDataBuffer, tmpPtr, dLen); |
|
1145 iDataLength = dLen; |
|
1146 iCurrentFrameLength = iDataLength; |
|
1147 |
|
1148 if(tmpPtr) |
|
1149 delete tmpPtr; |
|
1150 |
|
1151 // we have a complete frame |
|
1152 return ETrue; |
|
1153 |
|
1154 } |
|
1155 |
|
1156 } |
|
1157 |
|
1158 // ----------------------------------------------------------------------------- |
|
1159 // CTNEProcessorImpl::MNotifyThumbnailReady |
|
1160 // Called by thumbnail generator when thumbnail is ready |
|
1161 // for retrieval |
|
1162 // (other items were commented in a header). |
|
1163 // ----------------------------------------------------------------------------- |
|
1164 // |
|
1165 void CTNEProcessorImpl::MNotifyThumbnailReady(TInt aError) |
|
1166 { |
|
1167 // Handle if any error is returned |
|
1168 if (HandleThumbnailError(aError)) |
|
1169 return; |
|
1170 TInt bytesPerPixel = 0; |
|
1171 TInt error; |
|
1172 |
|
1173 if ( !iRgbBuf ) |
|
1174 { |
|
1175 TSize inputFrameResolution(iParser->iStreamParameters.iVideoWidth,iParser->iStreamParameters.iVideoHeight); |
|
1176 |
|
1177 // rgb specs |
|
1178 TUint thumbLength = inputFrameResolution.iWidth * inputFrameResolution.iHeight; |
|
1179 TUint thumbUVLength = thumbLength>>2; |
|
1180 |
|
1181 // VPASSERT(iYuvBuf); |
|
1182 // assign yuv pointers |
|
1183 TUint8* yBuf = iFrameBuffer; |
|
1184 TUint8* uBuf = yBuf + thumbLength; |
|
1185 TUint8* vBuf = uBuf + thumbUVLength; |
|
1186 |
|
1187 // check validity of thumbnail and associated operation |
|
1188 if(iThumbEnhance) // for saving to file |
|
1189 { |
|
1190 if(iThumbDisplayMode == ENone) // if no preference |
|
1191 iThumbDisplayMode = EColor16M; // 24-bit color image for enhancement |
|
1192 else if(iThumbDisplayMode != EColor16M) // invalid combination |
|
1193 { |
|
1194 HandleThumbnailError(KErrNotSupported); |
|
1195 return; |
|
1196 } |
|
1197 } |
|
1198 else // for screen display |
|
1199 { |
|
1200 if(iThumbDisplayMode == ENone) // if no preference |
|
1201 iThumbDisplayMode = EColor64K; // 16-bit image |
|
1202 } |
|
1203 |
|
1204 // determine proper bit depth for the bitmap |
|
1205 if(iThumbDisplayMode == EColor16M) |
|
1206 bytesPerPixel = 3; // 24-bit rgb takes 3 bytes, stored as bbbbbbbb gggggggg rrrrrrrr |
|
1207 else if(iThumbDisplayMode == EColor64K || iThumbDisplayMode == EColor4K) |
|
1208 bytesPerPixel = 2; // 12-bit rgb takes 2 bytes, stored as ggggbbbb xxxxrrrr |
|
1209 else |
|
1210 { |
|
1211 HandleThumbnailError(KErrNotSupported); |
|
1212 return; // support for 12-, 16- and 24-bit color images only |
|
1213 } |
|
1214 |
|
1215 // create output rgb buffer |
|
1216 TRAP(error, iRgbBuf = (TUint8*) User::AllocL(thumbLength * bytesPerPixel)); |
|
1217 if (HandleThumbnailError(error)) |
|
1218 return; |
|
1219 |
|
1220 TInt scanLineLength; |
|
1221 |
|
1222 // convert yuv to rgb |
|
1223 switch (iThumbDisplayMode) |
|
1224 { |
|
1225 |
|
1226 case EColor4K: |
|
1227 { |
|
1228 TInt error; |
|
1229 CYuv2Rgb12* yuvConverter; |
|
1230 TRAP(error, yuvConverter = new(ELeave) CYuv2Rgb12); |
|
1231 if (HandleThumbnailError(error)) |
|
1232 return; |
|
1233 scanLineLength = inputFrameResolution.iWidth * bytesPerPixel; |
|
1234 VPASSERT(yuvConverter); |
|
1235 TRAP(error, yuvConverter->ConstructL(inputFrameResolution.iWidth, inputFrameResolution.iHeight, inputFrameResolution.iWidth, inputFrameResolution.iHeight)); |
|
1236 if (HandleThumbnailError(error)) |
|
1237 return; |
|
1238 yuvConverter->Convert(yBuf, uBuf, vBuf, inputFrameResolution.iWidth, inputFrameResolution.iHeight, iRgbBuf, scanLineLength); |
|
1239 delete yuvConverter; |
|
1240 yuvConverter=0; |
|
1241 } |
|
1242 break; |
|
1243 |
|
1244 default: |
|
1245 case EColor64K: |
|
1246 { |
|
1247 TInt error; |
|
1248 CYuv2Rgb16* yuvConverter; |
|
1249 TRAP(error, yuvConverter = new(ELeave) CYuv2Rgb16); |
|
1250 if (HandleThumbnailError(error)) |
|
1251 return; |
|
1252 scanLineLength = inputFrameResolution.iWidth * bytesPerPixel; |
|
1253 VPASSERT(yuvConverter); |
|
1254 TRAP(error, yuvConverter->ConstructL(inputFrameResolution.iWidth, inputFrameResolution.iHeight, inputFrameResolution.iWidth, inputFrameResolution.iHeight);) |
|
1255 if (HandleThumbnailError(error)) |
|
1256 return; |
|
1257 yuvConverter->Convert(yBuf, uBuf, vBuf, inputFrameResolution.iWidth, inputFrameResolution.iHeight, iRgbBuf, scanLineLength); |
|
1258 delete yuvConverter; |
|
1259 yuvConverter=0; |
|
1260 } |
|
1261 break; |
|
1262 |
|
1263 case EColor16M: |
|
1264 { |
|
1265 TInt error; |
|
1266 CYuv2Rgb24* yuvConverter; |
|
1267 TRAP(error, yuvConverter = new(ELeave) CYuv2Rgb24); |
|
1268 if (HandleThumbnailError(error)) |
|
1269 return; |
|
1270 scanLineLength = inputFrameResolution.iWidth * bytesPerPixel; |
|
1271 VPASSERT(yuvConverter); |
|
1272 TRAP(error, yuvConverter->ConstructL(inputFrameResolution.iWidth, inputFrameResolution.iHeight, inputFrameResolution.iWidth, inputFrameResolution.iHeight)) |
|
1273 if (HandleThumbnailError(error)) |
|
1274 return; |
|
1275 yuvConverter->Convert(yBuf, uBuf, vBuf, inputFrameResolution.iWidth, inputFrameResolution.iHeight, iRgbBuf, scanLineLength); |
|
1276 delete yuvConverter; |
|
1277 yuvConverter=0; |
|
1278 } |
|
1279 break; |
|
1280 } |
|
1281 } |
|
1282 |
|
1283 if(!iThumbEnhance) |
|
1284 { |
|
1285 TSize inputFrameResolution(iParser->iStreamParameters.iVideoWidth,iParser->iStreamParameters.iVideoHeight); |
|
1286 |
|
1287 /* Pre-calculate pixel indices for horizontal scaling. */ |
|
1288 // inputFrameResolution is the resolution of the image read from video clip. |
|
1289 // iOutputThumbResolution is the final resolution desired by the caller. |
|
1290 |
|
1291 TInt xIncrement = inputFrameResolution.iWidth * iOutputThumbResolution.iWidth; |
|
1292 TInt xBoundary = iOutputThumbResolution.iWidth * iOutputThumbResolution.iWidth; |
|
1293 |
|
1294 TInt* xIndices = 0; |
|
1295 TRAPD(xIndicesErr, xIndices = new (ELeave) TInt[iOutputThumbResolution.iWidth]); |
|
1296 if (xIndicesErr == KErrNone) |
|
1297 { |
|
1298 TInt xDecision = xIncrement / bytesPerPixel; // looks like they changed here - orig was /2 |
|
1299 TInt sourceIndex = 0; |
|
1300 for (TInt x = 0; x < iOutputThumbResolution.iWidth; x++) |
|
1301 { |
|
1302 while (xDecision > xBoundary) |
|
1303 { |
|
1304 xDecision -= xBoundary; |
|
1305 sourceIndex += bytesPerPixel; |
|
1306 } |
|
1307 |
|
1308 xIndices[x] = sourceIndex; |
|
1309 xDecision += xIncrement; |
|
1310 } |
|
1311 } |
|
1312 else |
|
1313 { |
|
1314 HandleThumbnailError(xIndicesErr); |
|
1315 return; |
|
1316 } |
|
1317 |
|
1318 /* Initialize bitmap. */ |
|
1319 TRAPD(bitmapErr, iOutBitmap = new (ELeave) CFbsBitmap); |
|
1320 if ((xIndicesErr == KErrNone) && (bitmapErr == KErrNone)) |
|
1321 { |
|
1322 bitmapErr = iOutBitmap->Create(iOutputThumbResolution, iThumbDisplayMode/*EColor64K*/); |
|
1323 if (bitmapErr == KErrNone) |
|
1324 { |
|
1325 // Lock the heap to prevent the FBS server from invalidating the address |
|
1326 iOutBitmap->LockHeap(); |
|
1327 |
|
1328 /* Scale to desired iOutputThumbResolution and copy to bitmap. */ |
|
1329 TUint8* dataAddress = (TUint8*)iOutBitmap->DataAddress(); // fix |
|
1330 |
|
1331 TInt yIncrement = inputFrameResolution.iHeight * iOutputThumbResolution.iHeight; |
|
1332 TInt yBoundary = iOutputThumbResolution.iHeight * iOutputThumbResolution.iHeight; |
|
1333 |
|
1334 TInt targetIndex = 0; |
|
1335 TInt sourceRowIndex = 0; |
|
1336 TInt yDecision = yIncrement / 2; |
|
1337 for (TInt y = 0; y < iOutputThumbResolution.iHeight; y++) |
|
1338 { |
|
1339 while (yDecision > yBoundary) |
|
1340 { |
|
1341 yDecision -= yBoundary; |
|
1342 sourceRowIndex += (inputFrameResolution.iWidth * bytesPerPixel); |
|
1343 } |
|
1344 yDecision += yIncrement; |
|
1345 |
|
1346 |
|
1347 for (TInt x = 0; x < iOutputThumbResolution.iWidth; x++) |
|
1348 { |
|
1349 for (TInt i = 0; i < bytesPerPixel; ++i) |
|
1350 { |
|
1351 const TInt firstPixelSourceIndex = sourceRowIndex + xIndices[x] + i; |
|
1352 dataAddress[targetIndex] = iRgbBuf[firstPixelSourceIndex]; |
|
1353 targetIndex++; |
|
1354 } |
|
1355 } |
|
1356 } |
|
1357 iOutBitmap->UnlockHeap(); |
|
1358 } |
|
1359 |
|
1360 else |
|
1361 { |
|
1362 delete iOutBitmap; iOutBitmap = 0; |
|
1363 HandleThumbnailError(bitmapErr); |
|
1364 return; |
|
1365 } |
|
1366 } |
|
1367 else |
|
1368 { |
|
1369 HandleThumbnailError(bitmapErr); |
|
1370 delete[] xIndices; xIndices = 0; |
|
1371 return; |
|
1372 } |
|
1373 |
|
1374 delete[] xIndices; |
|
1375 xIndices = 0; |
|
1376 } |
|
1377 else // enhance |
|
1378 { |
|
1379 TInt i,j; |
|
1380 // create input bitmap and buffer |
|
1381 CFbsBitmap* inBitmap = 0; |
|
1382 TRAPD(inBitmapErr, inBitmap = new (ELeave) CFbsBitmap); |
|
1383 if( inBitmapErr == KErrNone ) |
|
1384 { |
|
1385 // create bitmaps |
|
1386 TSize originalResolution(iParser->iStreamParameters.iVideoWidth, iParser->iStreamParameters.iVideoHeight); |
|
1387 inBitmapErr = inBitmap->Create(originalResolution, iThumbDisplayMode/*EColor16M*/); |
|
1388 |
|
1389 if( inBitmapErr == KErrNone ) |
|
1390 { |
|
1391 // fill image from rgb buffer to input bitmap buffer |
|
1392 TPtr8 linePtr(0,0); |
|
1393 TInt lineLength = inBitmap->ScanLineLength(originalResolution.iWidth, iThumbDisplayMode); |
|
1394 for(j=0, i=0; j<originalResolution.iHeight; j++, i+=lineLength) |
|
1395 { |
|
1396 linePtr.Set(iRgbBuf+i, lineLength, lineLength); |
|
1397 inBitmap->SetScanLine((TDes8&)linePtr,j); |
|
1398 } |
|
1399 |
|
1400 // create output bitmap |
|
1401 TRAPD(outBitmapErr, iOutBitmap = new (ELeave) CFbsBitmap); |
|
1402 if( outBitmapErr == KErrNone ) |
|
1403 { |
|
1404 outBitmapErr = iOutBitmap->Create(iOutputThumbResolution, iThumbDisplayMode/*EColor16M*/); // same size as input frame |
|
1405 |
|
1406 if( outBitmapErr == KErrNone ) |
|
1407 { |
|
1408 // post-processing enhancement |
|
1409 TRAP(outBitmapErr, EnhanceThumbnailL((const CFbsBitmap*)inBitmap, (CFbsBitmap*)iOutBitmap)); |
|
1410 |
|
1411 } |
|
1412 else |
|
1413 { |
|
1414 delete inBitmap; inBitmap = 0; |
|
1415 delete iOutBitmap; iOutBitmap = 0; |
|
1416 HandleThumbnailError(outBitmapErr); |
|
1417 return; |
|
1418 } |
|
1419 } |
|
1420 else |
|
1421 { |
|
1422 delete inBitmap; inBitmap = 0; |
|
1423 HandleThumbnailError(outBitmapErr); |
|
1424 return; |
|
1425 } |
|
1426 } |
|
1427 else |
|
1428 { |
|
1429 delete inBitmap; inBitmap = 0; |
|
1430 HandleThumbnailError(inBitmapErr); |
|
1431 return; |
|
1432 } |
|
1433 |
|
1434 // delete input bitmap |
|
1435 delete inBitmap; |
|
1436 inBitmap = 0; |
|
1437 } |
|
1438 else |
|
1439 { |
|
1440 HandleThumbnailError(inBitmapErr); |
|
1441 return; |
|
1442 } |
|
1443 } |
|
1444 |
|
1445 delete iRgbBuf; |
|
1446 iRgbBuf = 0; |
|
1447 |
|
1448 // Handle video decoder deletion. If the decoder has been used, |
|
1449 // it has to be reset before deletion |
|
1450 if (iDecoder) |
|
1451 { |
|
1452 iDecoder->StopL(); |
|
1453 |
|
1454 delete iDecoder; |
|
1455 iDecoder = 0; |
|
1456 } |
|
1457 |
|
1458 VPASSERT(iThumbnailRequestStatus); |
|
1459 User::RequestComplete(iThumbnailRequestStatus, KErrNone); |
|
1460 iThumbnailRequestStatus = 0; |
|
1461 |
|
1462 PRINT((_L("CTNEProcessorImpl::MMNotifyThumbnailReady() end"))) |
|
1463 } |
|
1464 |
|
1465 |
|
1466 // ----------------------------------------------------------------------------- |
|
1467 // CTNEProcessorImpl::HandleThumbnailError |
|
1468 // Handle error in thumbnail generation |
|
1469 // (other items were commented in a header). |
|
1470 // ----------------------------------------------------------------------------- |
|
1471 // |
|
1472 TBool CTNEProcessorImpl::HandleThumbnailError(TInt aError) |
|
1473 { |
|
1474 if (aError != KErrNone) |
|
1475 { |
|
1476 VPASSERT(iThumbnailRequestStatus); |
|
1477 User::RequestComplete(iThumbnailRequestStatus, aError); |
|
1478 iThumbnailRequestStatus = 0; |
|
1479 return ETrue; |
|
1480 } |
|
1481 return EFalse; |
|
1482 } |
|
1483 |
|
1484 // ----------------------------------------------------------------------------- |
|
1485 // CTNEProcessorImpl::FetchThumb |
|
1486 // Returns a pointer to completed thumbnail bitmap |
|
1487 // (other items were commented in a header). |
|
1488 // ----------------------------------------------------------------------------- |
|
1489 // |
|
1490 void CTNEProcessorImpl::FetchThumb(CFbsBitmap*& aThumb) |
|
1491 { |
|
1492 aThumb = iOutBitmap; |
|
1493 iOutBitmap = 0; |
|
1494 |
|
1495 iState = EStateReadyToProcess; |
|
1496 } |
|
1497 |
|
1498 // ----------------------------------------------------------------------------- |
|
1499 // CTNEProcessorImpl::InitializeClipL |
|
1500 // Initializes the processor for processing a clip |
|
1501 // (other items were commented in a header). |
|
1502 // ----------------------------------------------------------------------------- |
|
1503 // |
|
1504 void CTNEProcessorImpl::InitializeClipL(RFile& aFileHandle) |
|
1505 { |
|
1506 |
|
1507 PRINT((_L("CTNEProcessorImpl::InitializeClipL() begin"))); |
|
1508 |
|
1509 iFirstFrameOfClip = ETrue; |
|
1510 iFirstFrameFlagSet = EFalse; |
|
1511 |
|
1512 if (!iParser) |
|
1513 { |
|
1514 iParser = (CMP4Parser*) CMP4Parser::NewL(this, aFileHandle); |
|
1515 } |
|
1516 |
|
1517 iParser->iFirstTimeClipParsing = ETrue; |
|
1518 iState = EStateIdle; |
|
1519 |
|
1520 // open file & parse header |
|
1521 CTNEProcessorImpl::TFileFormat format = CTNEProcessorImpl::EDataAutoDetect; |
|
1522 |
|
1523 User::LeaveIfError(OpenStream(aFileHandle, format)); |
|
1524 |
|
1525 if (iHaveVideo == EFalse) |
|
1526 User::Leave(KErrNotFound); |
|
1527 |
|
1528 VPASSERT(iState == EStateOpened); |
|
1529 |
|
1530 iState = EStatePreparing; |
|
1531 |
|
1532 // open demux & decoder |
|
1533 User::LeaveIfError(Prepare()); |
|
1534 |
|
1535 VPASSERT(iState == EStateReadyToProcess); |
|
1536 |
|
1537 PRINT((_L("CTNEProcessorImpl::InitializeClipL() end"))) |
|
1538 } |
|
1539 |
|
1540 // ----------------------------------------------------------------------------- |
|
1541 // CTNEProcessorImpl::ParseHeaderOnlyL |
|
1542 // Parses the header for a given clip |
|
1543 // (other items were commented in a header). |
|
1544 // ----------------------------------------------------------------------------- |
|
1545 // |
|
1546 void CTNEProcessorImpl::ParseHeaderOnlyL(CParser::TStreamParameters& aStreamParams, RFile& aFileHandle) |
|
1547 { |
|
1548 |
|
1549 if (!iParser) |
|
1550 { |
|
1551 // create an instance of the parser |
|
1552 iParser = (CMP4Parser*) CMP4Parser::NewL(this, aFileHandle); |
|
1553 } |
|
1554 iParser->ParseHeaderL(aStreamParams); |
|
1555 |
|
1556 // update output parameters. |
|
1557 UpdateStreamParameters(iParser->iStreamParameters, aStreamParams); |
|
1558 } |
|
1559 |
|
1560 |
|
1561 |
|
1562 // ----------------------------------------------------------------------------- |
|
1563 // CTNEProcessorImpl::OpenStream |
|
1564 // Opens a clip for processing |
|
1565 // (other items were commented in a header). |
|
1566 // ----------------------------------------------------------------------------- |
|
1567 // |
|
1568 TInt CTNEProcessorImpl::OpenStream(RFile& aFileHandle, TFileFormat aFileFormat) |
|
1569 { |
|
1570 // We can only streams in idle state |
|
1571 if (iState != EStateIdle) |
|
1572 return EInvalidProcessorState; |
|
1573 |
|
1574 TInt error = KErrNone; |
|
1575 iFileFormat = aFileFormat; |
|
1576 |
|
1577 // set descriptor to read buffer |
|
1578 TPtr8 readDes(0,0); |
|
1579 readDes.Set(iReadBuf, 0, KReadBufInitSize); |
|
1580 |
|
1581 // read data from the file |
|
1582 if ( (error = aFileHandle.Read(readDes)) != KErrNone ) |
|
1583 return error; |
|
1584 |
|
1585 if ( readDes.Length() < 8 ) |
|
1586 return KErrGeneral; |
|
1587 |
|
1588 |
|
1589 // detect if format is 3GP, 5-8 == "ftyp" |
|
1590 // This method is not 100 % proof, but good enough |
|
1591 if ( (iReadBuf[4] == 0x66) && (iReadBuf[5] == 0x74) && |
|
1592 (iReadBuf[6] == 0x79) && (iReadBuf[7] == 0x70) ) |
|
1593 { |
|
1594 iFileFormat = EData3GP; |
|
1595 iMuxType = EMux3GP; |
|
1596 } |
|
1597 else |
|
1598 return KErrNotSupported; |
|
1599 |
|
1600 // parse 3GP header |
|
1601 CMP4Parser *parser = 0; |
|
1602 if ( !iParser ) |
|
1603 { |
|
1604 TRAP(error, (parser = CMP4Parser::NewL(this, aFileHandle)) ); |
|
1605 if (error != KErrNone) |
|
1606 return error; |
|
1607 iParser = parser; |
|
1608 } |
|
1609 else |
|
1610 parser = (CMP4Parser*)iParser; |
|
1611 |
|
1612 TRAP(error, ParseHeaderL()); |
|
1613 |
|
1614 if (error != KErrNone) |
|
1615 return error; |
|
1616 |
|
1617 iState = EStateOpened; |
|
1618 |
|
1619 return KErrNone; |
|
1620 } |
|
1621 |
|
1622 // ----------------------------------------------------------------------------- |
|
1623 // CTNEProcessorImpl::CloseStream |
|
1624 // Closes the processed stream from parser |
|
1625 // (other items were commented in a header). |
|
1626 // ----------------------------------------------------------------------------- |
|
1627 // |
|
1628 TInt CTNEProcessorImpl::CloseStream() |
|
1629 { |
|
1630 |
|
1631 PRINT((_L("CTNEProcessorImpl::CloseStream() begin - iState = %d"), iState)) |
|
1632 |
|
1633 if ( (iState != EStateOpened) && (iState != EStateProcessing) ) |
|
1634 return EInvalidProcessorState; |
|
1635 |
|
1636 TInt error=0; |
|
1637 |
|
1638 // delete parser |
|
1639 if (iParser) |
|
1640 { |
|
1641 TRAP(error, |
|
1642 { |
|
1643 delete iParser; |
|
1644 iParser=0; |
|
1645 } |
|
1646 ); |
|
1647 if (error != KErrNone) |
|
1648 return error; |
|
1649 } |
|
1650 |
|
1651 // We are idle again |
|
1652 iState = EStateIdle; |
|
1653 |
|
1654 PRINT((_L("CTNEProcessorImpl::CloseStream() end "))) |
|
1655 |
|
1656 return KErrNone; |
|
1657 } |
|
1658 |
|
1659 |
|
1660 // ----------------------------------------------------------------------------- |
|
1661 // CTNEProcessorImpl::Prepare |
|
1662 // Prepares the processor for processing, opens demux & decoder |
|
1663 // (other items were commented in a header). |
|
1664 // ----------------------------------------------------------------------------- |
|
1665 // |
|
1666 TInt CTNEProcessorImpl::Prepare() |
|
1667 { |
|
1668 // We can only prepare from preparing state |
|
1669 if (iState != EStatePreparing) |
|
1670 return EInvalidProcessorState; |
|
1671 |
|
1672 // Make sure we now know the stream format |
|
1673 if (iFileFormat == EDataAutoDetect) |
|
1674 return EUnsupportedFormat; |
|
1675 |
|
1676 // Check whether the stream has audio, video or both, and whether it is |
|
1677 // muxed |
|
1678 switch (iFileFormat) |
|
1679 { |
|
1680 case EData3GP: |
|
1681 // the video and audio flags are set when |
|
1682 // the header is parsed. |
|
1683 iIsMuxed = ETrue; |
|
1684 break; |
|
1685 default: |
|
1686 User::Panic(_L("CTNEProcessorImpl"), EInvalidInternalState); |
|
1687 } |
|
1688 |
|
1689 iState = EStateReadyToProcess; |
|
1690 |
|
1691 return KErrNone; |
|
1692 } |
|
1693 |
|
1694 // ----------------------------------------------------------------------------- |
|
1695 // CTNEProcessorImpl::DoCloseVideoL |
|
1696 // Closes & deletes the structures used in processing |
|
1697 // (other items were commented in a header). |
|
1698 // ----------------------------------------------------------------------------- |
|
1699 // |
|
1700 void CTNEProcessorImpl::DoCloseVideoL() |
|
1701 { |
|
1702 if ((iState == EStateProcessing) || (iState == EStateReadyToProcess)|| |
|
1703 (iState == EStatePreparing) ) |
|
1704 { |
|
1705 PRINT((_L("CTNEProcessorImpl::DoCloseVideoL() - stopping"))) |
|
1706 User::LeaveIfError(Stop()); |
|
1707 iState = EStateOpened; |
|
1708 } |
|
1709 |
|
1710 // If we are buffering or opening at the moment or clip is open then close it |
|
1711 if ( (iState == EStateOpened) || (iState == EStateReadyToProcess)) |
|
1712 { |
|
1713 PRINT((_L("CTNEProcessorImpl::DoCloseVideoL() - closing stream"))) |
|
1714 User::LeaveIfError(CloseStream()); |
|
1715 iState = EStateIdle; |
|
1716 } |
|
1717 } |
|
1718 |
|
1719 |
|
1720 |
|
1721 // ----------------------------------------------------------------------------- |
|
1722 // CTNEProcessorImpl::Stop |
|
1723 // Stops processing & closes modules used in processing |
|
1724 // (other items were commented in a header). |
|
1725 // ----------------------------------------------------------------------------- |
|
1726 // |
|
1727 TInt CTNEProcessorImpl::Stop() |
|
1728 { |
|
1729 iDecoding = EFalse; |
|
1730 |
|
1731 // Check state |
|
1732 if ( (iState != EStateProcessing) && (iState != EStateReadyToProcess) && (iState != EStatePreparing) ) |
|
1733 return EInvalidProcessorState; |
|
1734 // We may also get here from the middle of a Prepare() attempt. |
|
1735 |
|
1736 PRINT((_L("CTNEProcessorImpl::Stop() begin"))) |
|
1737 |
|
1738 // Handle video encoder deletion. If the encoder has been used, |
|
1739 // it has to be reseted before deleting |
|
1740 if (iDecoder) |
|
1741 { |
|
1742 iDecoder->StopL(); |
|
1743 |
|
1744 delete iDecoder; |
|
1745 iDecoder = 0; |
|
1746 } |
|
1747 |
|
1748 iState = EStateOpened; |
|
1749 |
|
1750 PRINT((_L("CTNEProcessorImpl::Stop() end"))) |
|
1751 |
|
1752 return KErrNone; |
|
1753 } |
|
1754 |
|
1755 |
|
1756 // ----------------------------------------------------------------------------- |
|
1757 // CTNEProcessorImpl::Close |
|
1758 // Stops processing and closes all submodules except status monitor |
|
1759 // (other items were commented in a header). |
|
1760 // ----------------------------------------------------------------------------- |
|
1761 // |
|
1762 TInt CTNEProcessorImpl::Close() |
|
1763 { |
|
1764 |
|
1765 // delete all objects except status monitor |
|
1766 TRAPD(error, DoCloseVideoL()); |
|
1767 if (error != KErrNone) |
|
1768 return error; |
|
1769 |
|
1770 iState = EStateIdle; |
|
1771 |
|
1772 return KErrNone; |
|
1773 |
|
1774 } |
|
1775 |
|
1776 |
|
1777 // ----------------------------------------------------------------------------- |
|
1778 // CTNEProcessorImpl::RunL |
|
1779 // Called by the active scheduler when the video encoder initialization is done |
|
1780 // or an ending black frame has been encoded |
|
1781 // (other items were commented in a header). |
|
1782 // ----------------------------------------------------------------------------- |
|
1783 // |
|
1784 void CTNEProcessorImpl::RunL() |
|
1785 { |
|
1786 |
|
1787 PRINT((_L("CancelProcessingL begin, iDecoding %d, iDecoderInitPending %d, iDecodePending %d"), |
|
1788 iDecoding, iDecoderInitPending, iDecodePending )); |
|
1789 |
|
1790 // @@ YHK we probably dont need this flag ??? |
|
1791 // Don't decode if we aren't decoding |
|
1792 if (!iDecoding) |
|
1793 { |
|
1794 if (!IsActive()) |
|
1795 { |
|
1796 SetActive(); |
|
1797 iStatus = KRequestPending; |
|
1798 } |
|
1799 PRINT((_L("CH263Decoder::RunL() out from !iDecoding branch"))) |
|
1800 return; |
|
1801 } |
|
1802 |
|
1803 if (iDecoderInitPending) |
|
1804 { |
|
1805 iDecoderInitPending = EFalse; |
|
1806 if (iStatus != KErrNone) |
|
1807 { |
|
1808 MNotifyThumbnailReady(iStatus.Int()); |
|
1809 return; |
|
1810 } |
|
1811 // at this point we have already read a frame, |
|
1812 // so now start processing |
|
1813 iDecoder->StartL(); |
|
1814 |
|
1815 // stop if a fatal error has occurred in starting |
|
1816 // the transcoder (decoding stopped in MtroFatalError) |
|
1817 if (!iDecoding) |
|
1818 return; |
|
1819 |
|
1820 DecodeThumb(ETrue); |
|
1821 return; |
|
1822 } |
|
1823 |
|
1824 if (iDecodePending) |
|
1825 { |
|
1826 iDecodePending = EFalse; |
|
1827 |
|
1828 DecodeThumb(EFalse); |
|
1829 return; |
|
1830 } |
|
1831 } |
|
1832 |
|
1833 |
|
1834 // ----------------------------------------------------------------------------- |
|
1835 // CTNEProcessorImpl::RunError |
|
1836 // Called by the AO framework when RunL method has leaved |
|
1837 // (other items were commented in a header). |
|
1838 // ----------------------------------------------------------------------------- |
|
1839 // |
|
1840 TInt CTNEProcessorImpl::RunError(TInt aError) |
|
1841 { |
|
1842 return aError; |
|
1843 } |
|
1844 |
|
1845 |
|
1846 // ----------------------------------------------------------------------------- |
|
1847 // CTNEProcessorImpl::DoCancel |
|
1848 // Cancels any pending asynchronous requests |
|
1849 // (other items were commented in a header). |
|
1850 // ----------------------------------------------------------------------------- |
|
1851 // |
|
1852 void CTNEProcessorImpl::DoCancel() |
|
1853 { |
|
1854 |
|
1855 PRINT((_L("CTNEProcessorImpl::DoCancel() begin"))) |
|
1856 |
|
1857 // Cancel our internal request |
|
1858 if ( iStatus == KRequestPending ) |
|
1859 { |
|
1860 PRINT((_L("CTNEProcessorImpl::DoCancel() cancel request"))) |
|
1861 TRequestStatus *status = &iStatus; |
|
1862 User::RequestComplete(status, KErrCancel); |
|
1863 } |
|
1864 |
|
1865 PRINT((_L("CTNEProcessorImpl::DoCancel() end"))) |
|
1866 } |
|
1867 |
|
1868 |
|
1869 // ----------------------------------------------------------------------------- |
|
1870 // CTNEProcessorImpl::SetHeaderDefaults |
|
1871 // Sets appropriate default values for processing parameters |
|
1872 // in audio-only case |
|
1873 // (other items were commented in a header). |
|
1874 // ----------------------------------------------------------------------------- |
|
1875 // |
|
1876 void CTNEProcessorImpl::SetHeaderDefaults() |
|
1877 { |
|
1878 |
|
1879 // set suitable default values |
|
1880 iHaveVideo = ETrue; |
|
1881 iVideoType = EVideoH263Profile0Level10; |
|
1882 |
|
1883 iVideoParameters.iWidth = 0; |
|
1884 iVideoParameters.iHeight = 0; |
|
1885 iVideoParameters.iIntraFrequency = 0; |
|
1886 iVideoParameters.iNumScalabilityLayers = 0; |
|
1887 iVideoParameters.iReferencePicturesNeeded = 0; |
|
1888 // picture period in nanoseconds |
|
1889 iVideoParameters.iPicturePeriodNsec = TInt64(33366667); |
|
1890 |
|
1891 iStreamLength = 0; |
|
1892 iStreamSize = 0; |
|
1893 iStreamBitrate = 10000; |
|
1894 |
|
1895 } |
|
1896 |
|
1897 |
|
1898 // ----------------------------------------------------------------------------- |
|
1899 // CTNEProcessorImpl::ParseHeaderL |
|
1900 // Parses the clip header & sets internal variables accordingly |
|
1901 // (other items were commented in a header). |
|
1902 // ----------------------------------------------------------------------------- |
|
1903 // |
|
1904 void CTNEProcessorImpl::ParseHeaderL() |
|
1905 { |
|
1906 VPASSERT(iParser); |
|
1907 |
|
1908 if ( iMuxType != EMux3GP ) |
|
1909 User::Leave(EUnsupportedFormat); |
|
1910 |
|
1911 CParser::TStreamParameters streamParams; |
|
1912 |
|
1913 // parse |
|
1914 iParser->ParseHeaderL(streamParams); |
|
1915 |
|
1916 // copy input stream info into parser |
|
1917 UpdateStreamParameters(iParser->iStreamParameters, streamParams); |
|
1918 |
|
1919 // copy parameters |
|
1920 iHaveVideo = streamParams.iHaveVideo; |
|
1921 iVideoType = (TVideoType)streamParams.iVideoFormat; |
|
1922 iCanSeek = streamParams.iCanSeek; |
|
1923 iVideoParameters.iWidth = streamParams.iVideoWidth; |
|
1924 iVideoParameters.iHeight = streamParams.iVideoHeight; |
|
1925 iVideoParameters.iIntraFrequency = streamParams.iVideoIntraFrequency; |
|
1926 iVideoParameters.iNumScalabilityLayers = streamParams.iNumScalabilityLayers; |
|
1927 iVideoParameters.iReferencePicturesNeeded = streamParams.iReferencePicturesNeeded; |
|
1928 iVideoParameters.iPicturePeriodNsec = streamParams.iVideoPicturePeriodNsec; |
|
1929 |
|
1930 iStreamLength = streamParams.iStreamLength; |
|
1931 iStreamBitrate = streamParams.iStreamBitrate; |
|
1932 iStreamSize = streamParams.iStreamSize; |
|
1933 |
|
1934 // Ensure that the video isn't too large |
|
1935 if ( (iVideoParameters.iWidth > KTNEMaxVideoWidth) || |
|
1936 (iVideoParameters.iHeight > KTNEMaxVideoHeight) ) |
|
1937 User::Leave(EVideoTooLarge); |
|
1938 |
|
1939 } |
|
1940 |
|
1941 // ----------------------------------------------------------------------------- |
|
1942 // CTNEProcessorImpl::UpdateStreamParameters |
|
1943 // Copies stream parameters to destination structure |
|
1944 // (other items were commented in a header). |
|
1945 // ----------------------------------------------------------------------------- |
|
1946 // |
|
1947 void CTNEProcessorImpl::UpdateStreamParameters(CParser::TStreamParameters& aDestParameters, |
|
1948 CParser::TStreamParameters& aSrcParameters) |
|
1949 { |
|
1950 aDestParameters.iHaveVideo = aSrcParameters.iHaveVideo; |
|
1951 aDestParameters.iVideoFormat = aSrcParameters.iVideoFormat; |
|
1952 aDestParameters.iVideoWidth = aSrcParameters.iVideoWidth; |
|
1953 aDestParameters.iVideoHeight = aSrcParameters.iVideoHeight; |
|
1954 aDestParameters.iVideoPicturePeriodNsec = aSrcParameters.iVideoPicturePeriodNsec; |
|
1955 aDestParameters.iVideoIntraFrequency = aSrcParameters.iVideoIntraFrequency; |
|
1956 aDestParameters.iStreamLength = aSrcParameters.iStreamLength; |
|
1957 aDestParameters.iVideoLength = aSrcParameters.iVideoLength; |
|
1958 aDestParameters.iCanSeek = aSrcParameters.iCanSeek; |
|
1959 aDestParameters.iStreamSize = aSrcParameters.iStreamSize; |
|
1960 aDestParameters.iStreamBitrate = aSrcParameters.iStreamBitrate; |
|
1961 aDestParameters.iMaxPacketSize = aSrcParameters.iMaxPacketSize; |
|
1962 aDestParameters.iLogicalChannelNumberVideo = aSrcParameters.iLogicalChannelNumberVideo; |
|
1963 aDestParameters.iReferencePicturesNeeded = aSrcParameters.iReferencePicturesNeeded; |
|
1964 |
|
1965 aDestParameters.iFrameRate = aSrcParameters.iFrameRate; |
|
1966 |
|
1967 } |
|
1968 |
|
1969 |
|
1970 // ----------------------------------------------------------------------------- |
|
1971 // CTNEProcessorImpl::EnhanceThumbnailL |
|
1972 // Enhances the visual quality of the frame |
|
1973 // (other items were commented in a header). |
|
1974 // ----------------------------------------------------------------------------- |
|
1975 // |
|
1976 void CTNEProcessorImpl::EnhanceThumbnailL(const CFbsBitmap* aInBitmap, |
|
1977 CFbsBitmap* aTargetBitmap) |
|
1978 { |
|
1979 |
|
1980 // create enhancement object |
|
1981 if(!iEnhancer) |
|
1982 iEnhancer = (CDisplayChain*) CDisplayChain::NewL(); |
|
1983 |
|
1984 // enhance image |
|
1985 iEnhancer->ProcessL(aInBitmap, aTargetBitmap); |
|
1986 |
|
1987 // clear enhancement object |
|
1988 delete iEnhancer; |
|
1989 iEnhancer=0; |
|
1990 |
|
1991 } |
|
1992 |
|
1993 // --------------------------------------------------------- |
|
1994 // CH263Decoder::CheckFrameQuality |
|
1995 // Checks if a frame has "good" or "legible" quality |
|
1996 // (other items were commented in a header). |
|
1997 // --------------------------------------------------------- |
|
1998 // |
|
1999 TInt CTNEProcessorImpl::CheckFrameQuality(TUint8* aYUVDataPtr) |
|
2000 { |
|
2001 TInt i; |
|
2002 TInt minValue = 255; |
|
2003 TInt maxValue = 0; |
|
2004 TInt goodFrame = 1; |
|
2005 TInt runningSum=0; |
|
2006 TInt averageValue=0; |
|
2007 TInt pixelSkips = 4; |
|
2008 TInt numberOfSamples=0; |
|
2009 TInt minMaxDeltaThreshold = 20; |
|
2010 TInt extremeRegionThreshold = 20; |
|
2011 TInt ySize = iVideoParameters.iWidth*iVideoParameters.iHeight; |
|
2012 |
|
2013 // gather image statistics |
|
2014 for(i=0, numberOfSamples=0; i<ySize; i+=pixelSkips, aYUVDataPtr+=pixelSkips, numberOfSamples++) |
|
2015 { |
|
2016 runningSum += *aYUVDataPtr; |
|
2017 if(*aYUVDataPtr > maxValue) |
|
2018 maxValue = *aYUVDataPtr; |
|
2019 if(*aYUVDataPtr < minValue) |
|
2020 minValue = *aYUVDataPtr; |
|
2021 } |
|
2022 //VDASSERT(numberOfSamples,10); |
|
2023 averageValue = runningSum/numberOfSamples; |
|
2024 |
|
2025 // make decision based statistics |
|
2026 if((maxValue - minValue) < minMaxDeltaThreshold) |
|
2027 goodFrame = 0; |
|
2028 else |
|
2029 { |
|
2030 if(averageValue < (minValue + extremeRegionThreshold) || |
|
2031 averageValue > (maxValue - extremeRegionThreshold)) |
|
2032 goodFrame = 0; |
|
2033 } |
|
2034 return goodFrame; |
|
2035 } |
|
2036 |
|
2037 // ----------------------------------------------------------------------------- |
|
2038 // CVideoProcessorImpl::GetVideoClipFrameRate |
|
2039 // Gets video frame rate of current clip |
|
2040 // (other items were commented in a header). |
|
2041 // ----------------------------------------------------------------------------- |
|
2042 // |
|
2043 TReal CTNEProcessorImpl::GetVideoClipFrameRate() |
|
2044 { |
|
2045 |
|
2046 TReal rate; |
|
2047 iParser->GetVideoFrameRate(rate); |
|
2048 |
|
2049 return rate; |
|
2050 |
|
2051 } |
|
2052 |
|
2053 // ----------------------------------------------------------------------------- |
|
2054 // CVideoProcessorImpl::GetVideoTimeInMsFromTicks |
|
2055 // Converts a video timestamp from ticks to milliseconds |
|
2056 // (other items were commented in a header). |
|
2057 // ----------------------------------------------------------------------------- |
|
2058 // |
|
2059 TInt64 CTNEProcessorImpl::GetVideoTimeInMsFromTicks(TInt64 aTimeStampInTicks, TBool /*aCommonTimeScale*/) const |
|
2060 { |
|
2061 // @@ YHK code modified check this if this is necessary |
|
2062 TUint timeScale = iParser->iStreamParameters.iVideoTimeScale; |
|
2063 VPASSERT(timeScale > 0); |
|
2064 return TInt64( I64REAL(aTimeStampInTicks) / (TReal)timeScale * 1000 + 0.5 ); |
|
2065 } |
|
2066 |
|
2067 //============================================================================= |
|
2068 |
|
2069 |
|
2070 // End of File |
|
2071 |