home *** CD-ROM | disk | FTP | other *** search
Wrap
/* * VideoWrapper.cpp * * Copyright (C) Alberto Vigata - January 2000 ultraflask@yahoo.com * * * This file is part of FlasKMPEG, a free MPEG to MPEG/AVI converter * * FlasKMPEG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * FlasKMPEG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Make; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "VideoWrapper.h" #include "..\Subpic\Subpic.h" #include "..\cpusupport.h" #include "..\flrandom.h" #include "debug.h" extern "C" { #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <fcntl.h> #include <assert.h> #define GLOBAL #include "config.h" #include "global.h" //#include "getbits.h" } #include "misc_tables.h" //#define TRACK_FRAMEINFO #define ABS(x) ((x>0)? x:-x) const i64 nJumpBackwards = 102400; //Global variables for syncing with decoding thread VideoWrapper *myVideo; int gopPOS; bool closedGOP; bool synced=false; // This function returns the position of the // stream in offset bytes from the buffer // Defined in getbits.cpp extern int Get_Buffer_Pos(); extern void Reset_Bits_Buffer(); extern unsigned int Set_Buffer_State( ui8 *buffer, ui32 offset, ui32 buffersize ); extern void picture_data(); /* private prototypes */ static int video_sequence _ANSI_ARGS_((int *framenum)); static int Decode_Bitstream _ANSI_ARGS_((void)); static void Process_Options _ANSI_ARGS_((int argc, char *argv[])); static void InitClip(); static void DeInitClip(); bool NextSequenceHeader(){ ui32 nStartCode=Show_Bits(32); while (nStartCode!=0x000001B3 && nStartCode!=0x000001B7) { Flush_Buffer(8); nStartCode = Show_Bits(32); } if(nStartCode!=0x000001B3) return false; else return true; } VideoWrapper::VideoWrapper() { time = 0; error = 0; pictureWidth = 0; pictureHeight = 0; myVideo = this; stopDecoding = false; m_nLastDecodedPictureStart = 0; m_bSequenceInitialized = false; Idct = NULL; m_hModule = NULL; } VideoWrapper::~VideoWrapper() { if(m_bSequenceInitialized) { DeInitClip(); DeinitializeSequence(); } if(m_hModule) FreeLibrary( m_hModule ); } void VideoWrapper::LoadIdctModule( char *szTemp ) { FMpegIdct_PluginFunc iDctPlugFunc; FMpegIdctInfo iDctInfo; fmGetPluginFuncPtr getPluginFunc; long nSupportedExt = CPUGetSupportedExtensions(); long nSupportedExtIdct = 0; if( m_hModule = LoadLibrary( szTemp ) ) { if( getPluginFunc = (fmGetPluginFuncPtr)GetProcAddress( m_hModule, "fmGetPluginFunc" ) ) { getPluginFunc( &iDctPlugFunc ); if (iDctPlugFunc.GetIdct && iDctPlugFunc.GetIdctCount) { for( int i=0; i< iDctPlugFunc.GetIdctCount(); i++ ) { iDctPlugFunc.GetIdct( i, &iDctInfo ); // Check that the CPU supports this iDCT nSupportedExtIdct = 0; // Adapt flags from CPU support to iDCT api conventions nSupportedExtIdct |= nSupportedExt&CPU_SUPPORTS_MMX ? SUPPORTS_MMX : 0; nSupportedExtIdct |= nSupportedExt&CPU_SUPPORTS_SSE ? SUPPORTS_SSE : 0; nSupportedExtIdct |= nSupportedExt&CPU_SUPPORTS_SSE2 ? SUPPORTS_SSE2 : 0; nSupportedExtIdct |= nSupportedExt&CPU_SUPPORTS_3DNOW ? SUPPORTS_3DNOW : 0; nSupportedExtIdct |= nSupportedExt&CPU_SUPPORTS_3DNOW_EXT ? SUPPORTS_3DNOW_EXT : 0; nSupportedExtIdct &= PROCESSOR_SUPPORT_MASK; // If all the CPU features are supported if( (iDctInfo.dwFlags&PROCESSOR_SUPPORT_MASK) & nSupportedExtIdct) m_vIdct.AddItem( &iDctInfo ); // If no special CPU features are required add it also else if( !(iDctInfo.dwFlags&PROCESSOR_SUPPORT_MASK) ) m_vIdct.AddItem( &iDctInfo ); } } } } } bool VideoWrapper::IdctSpeedTest() { FMpegIdctInfo iDctInfo; i64 nStartTime, nEndTime; i64 nMinTime = MAXLONGLONG; bool bTimerError = false; FlRandom *pRandGen = new FlRandom; CFrame frTestFrame(NULL); // This will allocate a chunk of aligned memory // to 16bytes boundaries. frTestFrame.Set( 8, 8*250, FRAME_RGB32 ); // Get the buffer short *shortbuf = (short *)frTestFrame.GetBuffer(); // Randomize block for(int k=0; k<(8*8*250); k++) shortbuf[k] = (short)pRandGen->Random(0, 2048); for(int i=0; i<m_vIdct.GetCount(); i++) { iDctInfo = m_vIdct[i]; void (* idct)(short *block) = iDctInfo.Idct; // Perform speed test if(iDctInfo.InitIdct) iDctInfo.InitIdct(); short *block = shortbuf; QueryPerformanceCounter( (LARGE_INTEGER *)&nStartTime ); for(int j=0; j<250; j++) { for(k=0; k<64;k++) idct(block); block+=64; } QueryPerformanceCounter( (LARGE_INTEGER *)&nEndTime ); if(iDctInfo.DeInitIdct) iDctInfo.DeInitIdct(); if(nEndTime==0) { bTimerError = true; break; } i64 nTestTime = nEndTime - nStartTime; if( nTestTime < nMinTime ) { nMinTime = nTestTime; SelectIdct(i); } } // If we couldnt use QueryPerformanceCounter if(bTimerError) { // Look for one that supports MMX for(int i=0; i<m_vIdct.GetCount(); i++) { iDctInfo = m_vIdct[i]; if(iDctInfo.dwFlags&SUPPORTS_MMX) { SelectIdct(i); break; } } if( i>=m_vIdct.GetCount() ) SelectIdct(0); } return true; } bool VideoWrapper::LoadIDCTs() { WIN32_FIND_DATA find_data; char directory[MAX_PATH], szTemp[MAX_PATH]; HANDLE search_handle; int i; sprintf(directory, "%s\\*.idct.flask", program_directory ); m_vIdct.EmptyArray(); i=0; search_handle = FindFirstFile(directory, &find_data); if(search_handle==INVALID_HANDLE_VALUE){ m_vIdct.EmptyArray(); } else{ sprintf(szTemp,"%s\\%s", program_directory, find_data.cFileName ); LoadIdctModule( szTemp ); while( FindNextFile(search_handle, &find_data ) ){ sprintf(szTemp,"%s\\%s", program_directory, find_data.cFileName ); LoadIdctModule( szTemp ); } FindClose(search_handle); } IdctSpeedTest(); return m_vIdct.GetCount() > 0; } int VideoWrapper::Init(TVideoInit *pInitParams) { int ret; strcpy( this->program_directory, pInitParams->ProgramDirectory ); // Try to load iDCTs from files if(!LoadIDCTs()) return 0; // testing VideoWrapper::streamID = pInitParams->nStreamId; VideoWrapper::subStreamID = pInitParams->nSubStreamId; VideoWrapper::subpic_streamID = pInitParams->nSubpicStreamId; VideoWrapper::subpic_substreamID = pInitParams->nSubpicSubstreamId; //Start demuxer if(!SetInput(pInitParams->pMismInfo)) return 0; InitClip(); /* Creating clipping vector */ //MSSG decoder Initialization ld = &base; /* select base context */ Frame_Store_Flag = 1; /* store full frames */ Output_Type=2; /* YUV format output */ System_Stream_Flag = 0; FlushBuffer(); m_pTempVideoBuffer = NULL; //GET SEQUENCE PROPERTIES CDemux::SetStreamPos(0); StartReadLPES(); Initialize_Buffer(); if(!NextSequenceHeader()) return 0; /* Headers returns when end of sequence (0) or picture header has been parsed (1) */ ret = Get_Hdr(); if(ret==1) { //VIDEO SEQUENCE.... Sequence_Framenum=0; InitializeSequence(); pictureWidth = Coded_Picture_Width; pictureHeight= Coded_Picture_Height; m_bSequenceInitialized = true; } else return 0; // Now, guess if this is 24fps detectedFrameRateCode = Is24Progressive() ? 1 : frame_rate_code; updateVideoStats(); // Rewind stream CDemux::SetStreamPos(0); StartReadLPES(); Initialize_Buffer(); synced=true; // Store clut pClut = pInitParams->clut; return 1; } void VideoWrapper::FlushBuffer() { m_pTempVideoBuffer = NULL; m_nTempVideoBufferPtr = 0; m_nTempVideoBufferSize = 0; m_nPesPosState = PesNotDefined; Reset_Bits_Buffer(); } int VideoWrapper::Start(TVideoOptions *opt) { m_bFronFrameIsReady = false; m_pFrontFrame = m_pBackFrame = NULL; m_pFrameBuffer = opt->pFrameBuffer; /* // Output temporary YUV frame for adaptation to output switch( m_nOutFormat ) { case FRAME_RGB32: m_oYuvFrame.Set( pictureWidth, pictureHeight, FRAME_YUV444 ); m_oUtilFrame.Set( pictureWidth, pictureHeight, FRAME_RGB32 ); m_oYuvFrame.SetBuffer( (ui8*)&sYuvFrame ); break; case FRAME_YV12: m_oYuvFrame.Set( pictureWidth, pictureHeight, FRAME_YUV420 ); m_oUtilFrame.Set( pictureWidth, pictureHeight, FRAME_YV12 ); m_oYuvFrame.SetBuffer( (ui8*)&sYuvFrame ); break; }*/ m_bSyncedNotEnoughReferences = false; m_bFirstPictureDecoded = false; m_bPreviousDirWasForward = false; time=0; SelectIdct( opt->idctIndex ); /* IDCT */ if( myVideo->InitIdct ) myVideo->InitIdct(); m_bTopFieldExpected = true; //Decoder Configuration timeCode.frame=0; timeCode.hour=0; timeCode.minute=0; timeCode.second=0; stopDecoding=false; // Only allow recons_progressive flag if the frame rate is 29.97 or 30 if( frame_rate_code == 4 || frame_rate_code == 5 ) recons_progressive = opt->recons_progressive; else recons_progressive = false; memset(&p, 0, sizeof(presTimes)); internalPTS=0; // Pull down stuff pullDownDetected=false; pulldown_state = 3; interlaced_field_delay = (__int64)(MPEG2_CLK_REF*(frameDelay))>>1; progressive_frame_delay = (__int64)(MPEG2_CLK_REF*(1.25*frameDelay)); Bitstream_Framenum = 0; sequencePos=SEQ_FIRSTTIME; if( opt->bStartInSync ) { // During this start-stop sequence, only calls to GetFrame, can be performed // Timestamps will be valid since the sync point. // In the start part, we retrieve any possible references needed for the decoding // of the first frame. // We first set our position to the sync position CDemux::SetStreamPos( opt->nSyncPoint ); // Flush the buffer FlushBuffer(); // Now we decode the previous picture. // This will retrieve any references needed. // If there are not enough references available, inform about it // (likely we're at the beginning of the file) // First align the stream with the first picture to be decoded. int nParams = SEARCH_P | SEARCH_I | SEARCH_B | SEARCH_FORWARD | SEARCH_ALIGN_FIRST_FIELD; // This shoudlnt fail SearchPictureHeader( nParams ); // Now call DecodePictureEx that will decode the previous // picture retrieving any references needed. // Parse the start code so ui32 dword; GetFordDWord(&dword); // Decode without forcing to retrieve the first availabe picture DecodePictureEx(DECEX_PREV_PICTURE|DECEX_FORCEDECODING); // Reset the current timestamps because they are not valid at all. p.forward.PTS = p.backward.PTS = 0; p.forward.SCR = p.backward.SCR = 0; memset(&myPES, 0, sizeof(PESinfo)); // Now set the position back again to the sync point CDemux::SetStreamPos( opt->nSyncPoint ); // Flag sync Decoding operation. i.e. only calls to getFrame can be performed. m_bSyncedDecoding = true; // Flush the buffer FlushBuffer(); //Read Linear PES init StartReadLPES(); // Store the end point m_nEndPoint = opt->nEndPoint; if(m_nEndPoint <= opt->nSyncPoint) m_nEndPoint = GetStreamSize(); // That should do it! } else { m_bSyncedDecoding = false; } // Subpic init if( subpic_streamID!=-1 ) subpic_init(pClut); return 1; } i64 inline VideoWrapper::get_time(int adjust_clock) { i64 present; i64 delay = (__int64)(MPEG2_CLK_REF*(frameDelay)); ui32 half_delay = (int)delay>>1; if(p.image.PTS){ #if DEBUG if( ABS(internalPTS - p.image.PTS) > 900000 ) DBG_STR((str, "VideoWrapper::get_time() - Timestamp deviates from expected value by %I64d clock units or %I64d ms\n", internalPTS - p.image.PTS, (internalPTS - p.image.PTS)/27000)); #endif internalPTS = p.image.PTS; switch( adjust_clock ) { case EQUAL: present = internalPTS; break; case PLUS: present = internalPTS = internalPTS + half_delay; break; case MINUS: present = internalPTS = internalPTS - half_delay; break; } internalPTS += i64(recons_progressive ? (1.25*(double)delay) : delay); } else{ present = internalPTS; internalPTS += i64(recons_progressive ? (1.25*(double)delay) : delay); } return present; } // CFrame GetFrame // Frame source definition bool VideoWrapper::GetFrame(CFrame **pFrame) { int nVal; *pFrame = NULL; if( m_nLastDecodedPictureStart >= m_nEndPoint && m_bSyncedDecoding) return false; ui32 frame_delay = (ui32)(frameDelay * 10000.0); ui32 progressive_frame_delay = (ui32)(1.25 * frameDelay * 10000.0); if(!m_pFrontFrame) { m_pFrontFrame = m_pFrameBuffer->GetFreeFrame(); if(!m_pFrontFrame) return false; } if(!m_pBackFrame) { m_pBackFrame = m_pFrameBuffer->GetFreeFrame(); if(!m_pBackFrame) return false; } // If we already have a frame // return it if(m_bFronFrameIsReady) { *pFrame = m_pFrontFrame; m_pFrontFrame = m_pBackFrame; m_pBackFrame = NULL; m_bFronFrameIsReady = false; goto EndGetFrame; } m_pFrontFrame->SetDuration( frame_delay ); // Retrieve a frame nVal = get_frame(); if(!nVal) return false; // Parse frame structure if(progressive_sequence) { //Well, here MPEG2 standard defines // to output 1, 2 or 3 consecutive frames // for 60 fps progressive output // let's output just one // This is the case of MPEG1 too m_pFrontFrame->SetFrame(m_pDecoded); m_pFrontFrame->SetPresTime( get_time(EQUAL) ); m_pFrontFrame->SetFlags( FRAME_PROGRESSIVE ); *pFrame = m_pFrontFrame; m_pFrontFrame = m_pBackFrame; m_pBackFrame = NULL; } else { if(p.image.progressive_frame) { if(recons_progressive) { m_pFrontFrame->SetFrame(m_pDecoded); m_pFrontFrame->SetPresTime( get_time(EQUAL) ); m_pFrontFrame->SetFlags( FRAME_PROGRESSIVE ); m_pFrontFrame->SetDuration( progressive_frame_delay ); *pFrame = m_pFrontFrame; m_pFrontFrame = m_pBackFrame; m_pBackFrame = NULL; m_bTopFieldExpected = true; } else if(p.image.top_field_first) { m_pFrontFrame->SetFrame(m_pDecoded); m_pFrontFrame->SetPresTime( get_time(EQUAL) ); m_pFrontFrame->SetFlags( FRAME_INTERLACED ); if(p.image.repeat_first_field ) { m_pBackFrame->SetField(m_pDecoded, true); m_pBackFrame->SetFlags( FRAME_TOPFIELD ); m_bTopFieldExpected = false; } else m_bTopFieldExpected = true; *pFrame = m_pFrontFrame; m_pFrontFrame = m_pBackFrame; m_pBackFrame = NULL; } else //Bottom field first { m_pFrontFrame->SetField(m_pDecoded, false); m_pFrontFrame->SetPresTime( get_time(MINUS) ); m_pFrontFrame->SetFlags( FRAME_INTERLACED ); if(m_bTopFieldExpected) m_pFrontFrame->SetField(m_pDecoded, true); m_pBackFrame->SetField(m_pDecoded, true); if(p.image.repeat_first_field ) { m_pBackFrame->SetField(m_pDecoded, false); m_pBackFrame->SetPresTime( get_time(PLUS) ); m_bFronFrameIsReady = true; m_bTopFieldExpected = true; } else m_bTopFieldExpected = false; *pFrame = m_pFrontFrame; m_pFrontFrame = m_pBackFrame; m_pBackFrame = NULL; } } else // Frame is interlaced { if( p.image.picture_structure == FRAME_PICTURE ) { if(p.image.top_field_first) { m_pFrontFrame->SetFrame(m_pDecoded); m_pFrontFrame->SetPresTime( get_time(EQUAL) ); m_pFrontFrame->SetFlags( FRAME_INTERLACED ); *pFrame = m_pFrontFrame; m_pFrontFrame = m_pBackFrame; m_pBackFrame = NULL; m_bTopFieldExpected = true; } else //bottom field first { m_pFrontFrame->SetField(m_pDecoded, false); m_pFrontFrame->SetPresTime( get_time(MINUS) ); m_pFrontFrame->SetFlags( FRAME_INTERLACED ); if(m_bTopFieldExpected) m_pFrontFrame->SetField(m_pDecoded, true); m_pBackFrame->SetField(m_pDecoded, true); *pFrame = m_pFrontFrame; m_pFrontFrame = m_pBackFrame; m_pBackFrame = NULL; m_bTopFieldExpected = false; } } else // frames is reconstructed from field pictures { if(p.image.top_field_first) { m_pFrontFrame->SetFrame(m_pDecoded); m_pFrontFrame->SetPresTime( get_time(EQUAL) ); m_pFrontFrame->SetFlags( FRAME_INTERLACED ); *pFrame = m_pFrontFrame; m_pFrontFrame = m_pBackFrame; m_pBackFrame = NULL; m_bTopFieldExpected = true; } else //bottom field first { m_pFrontFrame->SetField(m_pDecoded, false); m_pFrontFrame->SetPresTime( get_time(MINUS) ); m_pFrontFrame->SetFlags( FRAME_INTERLACED ); if(m_bTopFieldExpected) m_pFrontFrame->SetField(m_pDecoded, true); m_pBackFrame->SetField(m_pDecoded, true); *pFrame = m_pFrontFrame; m_pFrontFrame = m_pBackFrame; m_pBackFrame = NULL; m_bTopFieldExpected = false; } } } } EndGetFrame: // Apply subpictures if( subpic_streamID!=-1 ) subpic_apply(*pFrame, (double)(*pFrame)->GetPresTime()/27000.0); ; return true; } bool VideoWrapper::GetBackByte(ui8 *byte) { bool bSuccess = true; PESinfo dummyPES; // The offset has to be if( m_nTempVideoBufferPtr <= (i32)m_nTempVideoBufferSize ) { // Grab previous PES if we're finished with this one // or we don't have any data if( m_nTempVideoBufferPtr <= 0 || !m_pTempVideoBuffer ) { // The buffer has been underrun. // Retrieve the previous buffer. This assumes that the position in the // stream is exactly the byte after the latest PES. // Rewind PES // If the pointer of the file is at the end // of the Pes rewind this PES first if( m_nPesPosState == PesEnd ) bSuccess = RewindPreviousPES( streamID ); // Rewind again to find the real Pes bSuccess = RewindPreviousPES( streamID ); ui64 PESpos = CDemux::GetStreamPos(); if( bSuccess ) { // Read the PES // Invalid value for streamId dummyPES.streamID = 0xAA; while( bSuccess && dummyPES.streamID!=streamID ) { bSuccess = ReadPES( (unsigned char **)&m_pTempVideoBuffer, &dummyPES ); if( bSuccess ) { m_nTempVideoBufferSize = dummyPES.payloadSize; m_nTempVideoBufferPtr = dummyPES.payloadSize; // Update this two variables to be able to // reposition the stream after this. m_nPesPosState = PesBegin; m_nEndPesPosition = CDemux::GetStreamPos(); } } // Because we want to emulate the going backwards // behaviour, set the position to the beginning of the PES // again CDemux::SetStreamPos( PESpos ); } else DBG_STR((str, "VideoWrapper::GetBackByte() - Couldnt rewind PES\n")); } // There are still bytes in this buffer to gather. if( bSuccess ) { m_nTempVideoBufferPtr--; *byte = *(m_pTempVideoBuffer + m_nTempVideoBufferPtr); } } else { DBG_STR((str, "VideoWrapper::GetBackByte() - Buffer offset outside bounds\n")) bSuccess = false; } return bSuccess; } bool VideoWrapper::GetFordByte(ui8 *byte) { bool bSuccess = true; PESinfo dummyPES; // The offset has to be minor than the buffer size if( m_nTempVideoBufferPtr <= (i32)m_nTempVideoBufferSize ) { // Grab previous PES if we're finished with this one if( m_nTempVideoBufferPtr >= (i32)m_nTempVideoBufferSize || !m_pTempVideoBuffer) { // Read the PES // If the pointer is a the beginning of the Pes // We just read, read it again to parse it. if( m_nPesPosState == PesBegin ) bSuccess = ReadPES( (unsigned char **)&m_pTempVideoBuffer, &dummyPES ); // Invalid value for streamId dummyPES.streamID = 0xAA; while( bSuccess && dummyPES.streamID!=streamID ) { bSuccess = ReadPES( (unsigned char **)&m_pTempVideoBuffer, &dummyPES ); if( bSuccess && dummyPES.streamID==streamID ) { m_nPesPosState = PesEnd; m_nTempVideoBufferSize = dummyPES.payloadSize; m_nTempVideoBufferPtr = 0; } } } // There are still bytes in this buffer to gather. if( bSuccess ) { *byte = *(m_pTempVideoBuffer + m_nTempVideoBufferPtr); m_nTempVideoBufferPtr++; } } else { bSuccess = false; } return bSuccess; } bool VideoWrapper::GetFordDWord(ui32 *dword) { ui8 byte; *dword = 0; bool bSuccess = GetFordByte( &byte ); *dword = byte << 24; if( bSuccess ) bSuccess = GetFordByte( &byte ); *dword |= byte << 16; if( bSuccess ) bSuccess = GetFordByte( &byte ); *dword |= byte << 8; if( bSuccess ) bSuccess = GetFordByte( &byte ); *dword |= byte; return bSuccess; } bool VideoWrapper::SearchPictureHeader( int &nParams ) { bool bSuccess=true; bool bFound = false; i32 nParsedBytes; ui32 nPictureType; ui32 nPrevPictureType; ui32 nTemporalReference; ui64 nFirstFieldPictureStartPos=0; bool bLookingSecondField=false; ui32 nPrevTemporalReference; ui16 nVBVDelay; ui8 nPictureStructure; ui8 byte; while( !bFound && bSuccess ) { ui32 nStartCode = 0xFFFFFFFF; // Select direccion of search if( nParams&SEARCH_BACKWARDS ) while( nStartCode!=PICTURE_START_CODE && bSuccess ) { bSuccess = GetBackByte(&byte); nStartCode = (nStartCode>>8) | (byte<<24); } else while( nStartCode!=PICTURE_START_CODE && bSuccess ) { bSuccess = GetFordByte(&byte); nStartCode = (nStartCode<<8) | byte; } if( bSuccess ) { // We found a picture header and we are aligned with it // Get start code // Only parse the start code if we were reading // backwards if( nParams&SEARCH_BACKWARDS ) GetFordDWord(&nStartCode); GetFordByte(&byte); nTemporalReference = byte << 2; GetFordByte(&byte); nTemporalReference |= (byte&0xC0)>>6; nParsedBytes = 6; nPictureType = (byte&0x3F)>>3; // If we're not interested in this type // continue searching #define FOUND_TYPE ((nPictureType==P_TYPE && nParams&SEARCH_P) || \ (nPictureType==B_TYPE && nParams&SEARCH_B) || \ (nPictureType==I_TYPE && nParams&SEARCH_I) ) #if 0 char cTypes[] = {'X','I','P','B','X','X','X','X','X','X','X','X','X','X','X','X'}; DBG_STR((str, "VideoWrapper::SearchPictureHeader - %c picture found at %d\n", cTypes[nPictureType], GetStreamPos()-6)) #endif if( nParams&SEARCH_ALIGN_FIRST_FIELD ) { // parse the rest of the header nVBVDelay = (byte&0x07)<<13; GetFordByte(&byte); nVBVDelay |= byte<<5; GetFordByte(&byte); nVBVDelay |= (byte&0xF8)>>3; nParsedBytes += 2; // Keep track of the bits remaining in 'byte' // Now we have 3 bits remaining. ui32 nBitsRemaining = 3; #define Rewind(x) { for(int i=0; i<x; i++) GetBackByte(&byte); } // Find next start code prefix ui32 nStartCode = 0xFFFFFF00; while( (nStartCode&0x00FFFFFF)!=1 && bSuccess ) { bSuccess = GetFordByte(&byte); nParsedBytes += 1; nStartCode = (nStartCode<<8) | byte; } if(!bSuccess) continue; GetFordByte(&byte); nParsedBytes += 1; if(byte!=0xB5) { // Either this is MPEG1 or something wrong happened goto validate; } GetFordByte(&byte); if((byte>>4)!=8) // The extension_start_code_identifier is 8 for picture continue; // something wrong happened nParsedBytes += 1; GetFordByte(&byte); // parse some stuff GetFordByte(&byte); nParsedBytes += 2; nPictureStructure = byte&0x03; bool bJumpToPos = false; if(nPictureStructure==FRAME_PICTURE){ if(FOUND_TYPE) { bFound = true; SetSearchType( nParams, nPictureType ); } } else{// we have a field picture if( !bLookingSecondField ){ nFirstFieldPictureStartPos = GetStreamPos() - nParsedBytes; nPrevTemporalReference = nTemporalReference; nPrevPictureType = nPictureType; bLookingSecondField = true; } else { // In this two field pictures, the temporal reference // matches. These two form the frame picture. Position // ourselves in the first one. if( nPrevTemporalReference==nTemporalReference ){ // If we're going forward, it was the previous one if(nParams&SEARCH_FORWARD) { bJumpToPos = true; nPictureType = nPrevPictureType; } } else{ if(nParams&SEARCH_BACKWARDS) { bJumpToPos = true; nPictureType = nPrevPictureType; } } Second_Field = 0; if(FOUND_TYPE) { SetSearchType( nParams, nPictureType ); bFound = true; } bLookingSecondField = false; } } if(bFound || nParams&SEARCH_BACKWARDS) { if(bJumpToPos) SetStreamPos(nFirstFieldPictureStartPos); else Rewind(nParsedBytes); } } else { validate: if(FOUND_TYPE) { bFound = true; SetSearchType( nParams, nPictureType ); } if( bFound || nParams&SEARCH_BACKWARDS ) { Rewind(nParsedBytes); } } } } return bSuccess; } bool VideoWrapper::EnterCoreContext() { Set_Buffer_State( m_pTempVideoBuffer, m_nTempVideoBufferPtr, m_nTempVideoBufferSize ); return true; } bool VideoWrapper::ExitCoreContext() { m_nTempVideoBufferPtr = Get_Buffer_Pos(); // If the pos is <0 means that the current position // of the video buffer belongs to a previous buffer // Rewind it if( m_nTempVideoBufferPtr<0 ) { int nRewindBytes = -m_nTempVideoBufferPtr; // Set the beginning of the buffer m_nTempVideoBufferPtr = 0; // Rewind ui8 byte; for(int i=0; i<nRewindBytes; i++) GetBackByte(&byte); } return true; } //////////////////////////////////////////////////////////////////////////////// // // Navigational retrieving methods. Timestamps are not valid within this context. // //////////////////////////////////////////////////////////////////////////////// void VideoWrapper::UpdateDecodedPtr() { unsigned char *pDecoded; pDecoded = p.actual.picture_coding_type==B_TYPE ? auxframe[0] : forward_reference_frame[0]; for(int i=0; i<3; i++) if( pDecoded==(unsigned char *)m_frRef[i].GetBuffer() ) break; m_pDecoded = &m_frRef[i]; // Update picture info p.image = p.actual.picture_coding_type==B_TYPE ? p.actual : p.forward; } //------------------------------------------------------------------- // GetSingleFrame - pFrame should be copied and not used directly. //------------------------------------------------------------------- inline bool VideoWrapper::GetSingleFrame(CFrame **pFrame, int nFlags) { DecodePictureEx(nFlags); UpdateDecodedPtr(); if(m_bReadError) error=END_OF_STREAM; bool bSuccess =!m_bReadError; if(!bSuccess) { *pFrame = NULL; return false; } *pFrame = m_pDecoded; return false; } bool VideoWrapper::GetNextFrame( CFrame **pFrame ) { return GetSingleFrame(pFrame, DECEX_NEXT_PICTURE); } bool VideoWrapper::GetPrevFrame( CFrame **pFrame ) { return GetSingleFrame(pFrame, DECEX_PREV_PICTURE); } bool VideoWrapper::GetNextIFrame(CFrame **pFrame) { return GetSingleFrame(pFrame, DECEX_NEXT_PICTURE|DECEX_JUST_IPICTURE); } bool VideoWrapper::GetPrevIFrame(CFrame **pFrame) { return GetSingleFrame(pFrame, DECEX_PREV_PICTURE|DECEX_JUST_IPICTURE); } int VideoWrapper::DecodePictureEx( int nDecExParams ) { int nRetValue=0; int nPictureTypes, nDecodingModes; int nParams; nPictureTypes = nDecExParams&DECEX_JUST_IPICTURE ? SEARCH_I : SEARCH_I | SEARCH_B | SEARCH_P; // If just I picture was demanded, skip references search nDecodingModes = nDecExParams&DECEX_JUST_IPICTURE ? SKIP_REF_SEARCH|DECODING_PICTURE : DECODING_PICTURE; if( nDecExParams&DECEX_NEXT_PICTURE ) { nParams = nPictureTypes | SEARCH_FORWARD | SEARCH_ALIGN_FIRST_FIELD; if( SearchPictureHeader( nParams ) ) { if( m_bFirstPictureDecoded ) { nDecodingModes |= SKIP_REF_SEARCH; nRetValue = DecodePicture(nDecodingModes); } else { m_bFirstPictureDecoded = true; nRetValue = DecodePicture(nDecodingModes); // This is here in the case of an I frame. Likely the first // of the stream // Decode next picture. Should be a P. while( nRetValue==DEC_NOTENOUGHREF && !m_bReadError && !(nDecExParams&DECEX_FORCEDECODING)) // this will return DEC_NOTENOUGHREF nRetValue=DecodePicture(nDecodingModes); } m_bPreviousDirWasForward = true; } } else { // Now rewind picture just decoded nParams = nPictureTypes | SEARCH_BACKWARDS | SEARCH_ALIGN_FIRST_FIELD; SearchPictureHeader( nParams ); nParams = nPictureTypes | SEARCH_BACKWARDS | SEARCH_ALIGN_FIRST_FIELD; DBG_STR((str, "VideoWrapper::DecodePictureEx - back decoding starting at %d\n", GetStreamPos())) if( SearchPictureHeader( nParams ) ) { m_bFirstPictureDecoded = false; nRetValue = DecodePicture(nDecodingModes); // This is here in the case of an I frame. Likely the first // of the stream // Decode next picture. Should be a P. while( nRetValue==DEC_NOTENOUGHREF && !m_bReadError && !(nDecExParams&DECEX_FORCEDECODING)) // this will return DEC_NOTENOUGHREF nRetValue=DecodePicture(nDecodingModes); } //DBG_STR((str, "VideoWrapper::DecodePictureEx - back decoding stopping at %d\n", GetStreamPos())) m_bPreviousDirWasForward = false; } // If just and I picture was demanded, reposition the stream // to the frame after the next I or P picture, because DECEX_JUST_IPICTURE makes // the decoder to output the I picture without references. if( nDecExParams&DECEX_JUST_IPICTURE ) { // Search the next I or P nParams = SEARCH_P | SEARCH_I | SEARCH_FORWARD | SEARCH_ALIGN_FIRST_FIELD; SearchPictureHeader( nParams ); // Get one byte to avoid aligning to the I or P again ui8 byte; GetFordByte(&byte); // Align to the frame after it nParams = SEARCH_P | SEARCH_I | SEARCH_B | SEARCH_FORWARD | SEARCH_ALIGN_FIRST_FIELD; SearchPictureHeader( nParams ); } return nRetValue; } int VideoWrapper::DecodePicture(int nDecParams) { ui64 nPicturePos; int nParams, cc; unsigned char *tmp; /* temporary swap pointer */ bool bSuccess; int nReturn = DEC_OK; // Our goal is to decode the picture that is this position. // Parse picture header nParams = SEARCH_P | SEARCH_I | SEARCH_B | SEARCH_FORWARD | SEARCH_ALIGN_FIRST_FIELD; if(!(nDecParams&SKIP_REF_SEARCH)) { if( SearchPictureHeader( nParams ) ) { // If we are told NOT to search // for references skip this section. // This is flagged when we already have // the correct references. // Retrieve position of this picture nPicturePos = GetStreamPos(); switch( GetSearchType( nParams ) ) { case I_TYPE: // No references pictures needed. Decode picture right away. // However the output will be the previous I or P // Retrieve it only if we this is the first call to DecodePicture, // i.e. we're not doing recursive calls to DecodePicture to retrieve // references. // Backward reference needed if( nDecParams&DECODING_PICTURE ) { nParams = SEARCH_I | SEARCH_P | SEARCH_BACKWARDS | SEARCH_ALIGN_FIRST_FIELD; if( SearchPictureHeader( nParams ) ) // We found the previous P or I picture. Decode it. nReturn = DecodePicture(); else nReturn = DEC_NOTENOUGHREF; } break; case P_TYPE: // Backward reference needed nParams = SEARCH_I | SEARCH_P | SEARCH_BACKWARDS | SEARCH_ALIGN_FIRST_FIELD; if( SearchPictureHeader( nParams ) ) { // We found the previous P or I picture. Decode it. nReturn = DecodePicture(); // Put this P or I frame as reference } else nReturn = DEC_NOTENOUGHREF; // Rewind stream to find previous I or P picture. // Decode it and put it as reference. Come back to this place. break; case B_TYPE: ui64 nFirstRefPos = -1; bool bFirstRefOk=false, bSecondRefOk=false; // Search for the previous I or P nParams = SEARCH_P | SEARCH_I | SEARCH_BACKWARDS | SEARCH_ALIGN_FIRST_FIELD; bSuccess = SearchPictureHeader( nParams ); if(bSuccess) { nFirstRefPos = GetStreamPos(); // Search for the previous I or P picture again nParams = SEARCH_P | SEARCH_I | SEARCH_BACKWARDS | SEARCH_ALIGN_FIRST_FIELD; bSuccess = SearchPictureHeader( nParams ); if( bSuccess ) bFirstRefOk = DecodePicture()!=DEC_NOTENOUGHREF; } // If we found the first reference decode it if( nFirstRefPos != -1 ) { // Set the stream to the first reference // we found SetStreamPos( nFirstRefPos ); // Decode it bSecondRefOk = DecodePicture()!=DEC_NOTENOUGHREF; } nReturn = ( bSuccess && bFirstRefOk && bSecondRefOk ) ? DEC_OK : DEC_NOTENOUGHREF; break; } // Restore position of the beginning of this picture SetStreamPos( nPicturePos ); } } if(!Second_Field) m_nLastDecodedPictureStart = GetStreamPos(); // Don't enter and exit context if we are in continous reading mode! if(!(nDecParams&CONTINOUS_READING)) EnterCoreContext(); // We should be align to the first field by now. Second_Field = 0; start_decode: if(Get_Hdr()) { if (picture_structure==FRAME_PICTURE && Second_Field) { DBG_STR((str, "VideoWrapper::DecodePicture - Odd number of field pictures\n")); Second_Field = 0; } if( !(nDecParams&DONT_UPDATE_REF) ) { // DBG_STR((str, "VideoWrapper::DecodePicture - pic type %d sec field %d\n", picture_coding_type,Second_Field)); // Update picture buffers for (cc=0; cc<3; cc++) { /* B pictures do not need to be saved for future reference */ if (picture_coding_type==B_TYPE) { current_frame[cc] = auxframe[cc]; } else { /* only update at the beginning of the coded frame */ if (!Second_Field) { tmp = forward_reference_frame[cc]; /* the previously decoded reference frame is stored coincident with the location where the backward reference frame is stored (backwards prediction is not needed in P pictures) */ forward_reference_frame[cc] = backward_reference_frame[cc]; /* update pointer for potential future B pictures */ backward_reference_frame[cc] = tmp; } /* can erase over old backward reference frame since it is not used in a P picture, and since any subsequent B pictures will use the previously decoded I or P frame as the backward_reference_frame */ current_frame[cc] = backward_reference_frame[cc]; } /* IMPLEMENTATION: one-time folding of a line offset into the pointer which stores the memory address of the current frame saves offsets and conditional branches throughout the remainder of the picture processing loop */ if (picture_structure==BOTTOM_FIELD) current_frame[cc]+= (cc==0) ? Coded_Picture_Width : Chroma_Width; } } // Finally decode the picture data picture_data(); if (picture_structure!=FRAME_PICTURE) Second_Field = !Second_Field; if (picture_structure!=FRAME_PICTURE && Second_Field) goto start_decode; } else { DBG_STR((str, "VideoWrapper::DecodePicture - GetHdr() failed.\n")); } // Don't enter and exit context if we are in continous reading mode! if(!(nDecParams&CONTINOUS_READING)) ExitCoreContext(); return nReturn; } bool VideoWrapper::get_frame() { updateVideoStats(); time= (ui32)(double(GetTime())/ (double)27000); //miliseconds //In case there is a problem timeCode.hour= hour; timeCode.minute= minute; timeCode.second= sec; timeCode.frame= frame; if(stopDecoding){ error=PLAYER_STOPPED; return 0; } if(m_bSyncedDecoding) { int nDecodingModes = SKIP_REF_SEARCH|DECODING_PICTURE|CONTINOUS_READING; // Decode an extra picture if we don't have enough references DecodePicture(nDecodingModes); } else { DecodePictureEx(DECEX_NEXT_PICTURE); } #ifdef TRACK_FRAMEINFO DBG_STR(( str, "Frame info: Type= %d, Int=%s, TS = %d\n", p.actual.picture_coding_type, p.actual.progressive_frame ? "Yes" : "No", p.actual.PTS / 300)); #endif UpdateDecodedPtr(); if(m_bReadError) error=END_OF_STREAM; return !m_bReadError; } int VideoWrapper::Stop() { if(m_pFrontFrame) m_pFrontFrame->Release(); if(m_pBackFrame) m_pBackFrame->Release(); time=0; if( subpic_streamID!=-1 ) subpic_free(); /* reset subpics */ /* IDCT */ if( myVideo->DeInitIdct ) myVideo->DeInitIdct(); return 0; } void DeInitClip(){ free(Clip-384); } void InitClip(){ int i; /* Clip table */ if (!(Clip=(unsigned char *)malloc(1024))) Error("Clip[] malloc failed\n"); Clip += 384; for (i=-384; i<640; i++) Clip[i] = (i<0) ? 0 : ((i>255) ? 255 : i); } /* mostly IMPLEMENTAION specific rouintes */ void VideoWrapper::InitializeSequence() { int size, chroma_size, luma_size; static int Table_6_20[3] = {6,8,12}; /* check scalability mode of enhancement layer */ if (Two_Streams && (enhan.scalable_mode!=SC_SNR) && (base.scalable_mode!=SC_DP)) Error("unsupported scalability mode\n"); /* force MPEG-1 parameters for proper decoder behavior */ /* see ISO/IEC 13818-2 section D.9.14 */ if (!base.MPEG2_Flag) { progressive_sequence = 1; progressive_frame = 1; picture_structure = FRAME_PICTURE; frame_pred_frame_dct = 1; chroma_format = CHROMA420; matrix_coefficients = 5; } /* round to nearest multiple of coded macroblocks */ /* ISO/IEC 13818-2 section 6.3.3 sequence_header() */ mb_width = (horizontal_size+15)/16; mb_height = (base.MPEG2_Flag && !progressive_sequence) ? 2*((vertical_size+31)/32) : (vertical_size+15)/16; Coded_Picture_Width = 16*mb_width; Coded_Picture_Height = 16*mb_height; //Allocate space for Output Bitmap //DibArray=malloc( Coded_Picture_Width * Coded_Picture_Height * 3 ); //DibArray = TempArray; /* ISO/IEC 13818-2 sections 6.1.1.8, 6.1.1.9, and 6.1.1.10 */ Chroma_Width = (chroma_format==CHROMA444) ? Coded_Picture_Width : Coded_Picture_Width>>1; Chroma_Height = (chroma_format!=CHROMA420) ? Coded_Picture_Height : Coded_Picture_Height>>1; /* derived based on Table 6-20 in ISO/IEC 13818-2 section 6.3.17 */ block_count = Table_6_20[chroma_format-1]; /* Allocate buffers */ chroma_size = Chroma_Width*Chroma_Height; luma_size = Coded_Picture_Width*Coded_Picture_Height; size = luma_size + 2*(chroma_size); /* Set the format in our frames. This will allocate the space in them */ for(int i=0; i<3; i++) m_frRef[i].Set( Coded_Picture_Width, Coded_Picture_Height, FRAME_YV12 ); backward_reference_frame[0] = (unsigned char *)m_frRef[0].GetBuffer(); forward_reference_frame[0] = (unsigned char *)m_frRef[1].GetBuffer(); auxframe[0] = (unsigned char *)m_frRef[2].GetBuffer();; /* Setup pointers */ backward_reference_frame[1] = backward_reference_frame[0] + luma_size + chroma_size; backward_reference_frame[2] = backward_reference_frame[0] + luma_size; forward_reference_frame[1] = forward_reference_frame[0] + luma_size + chroma_size; forward_reference_frame[2] = forward_reference_frame[0] + luma_size; auxframe[1] = auxframe[0] + luma_size + chroma_size; auxframe[2] = auxframe[0] + luma_size; } void Error(char *text) { fprintf(stderr,text); // exit(1); } /* Trace_Flag output */ void Print_Bits(int code,int bits,int len) { int i; for (i=0; i<len; i++) printf("%d",(code>>(bits-1-i))&1); } void VideoWrapper::DeinitializeSequence() { /* clear flags */ base.MPEG2_Flag=0; } bool VideoWrapper::SetStreamPos(ui64 pos) { // Lock the decoder CAutoLock lock(&m_csLockSection); PESinfo dummyPes; bool bSuccess = false; i64 nFirstJump = (i64)pos - nJumpBackwards; ui64 nCurrentPos; if( nFirstJump<=0 ) nFirstJump = 0; // Align stream to the pack header near the first Jump CDemux::SetStreamPos( CDemux::GetSyncPoint(pos) ); m_bReadSuccess = true; // Read a PES of video while( m_bReadSuccess && (pos >= CDemux::GetStreamPos()) ) { while( (m_bReadSuccess = ReadPES((unsigned char **)&m_pTempVideoBuffer, &dummyPes)) && (dummyPes.streamID == streamID) ) { // Wet got a video PES. // Get current Position nCurrentPos = CDemux::GetStreamPos(); // Is the position requested inside our PES? if( pos < nCurrentPos && pos >= (nCurrentPos - dummyPes.payloadSize) ) { //Yep, it is. m_nTempVideoBufferSize = dummyPes.payloadSize; m_nTempVideoBufferPtr = (ui32)(dummyPes.payloadSize - (nCurrentPos - pos)); m_nPesPosState = PesEnd; bSuccess = true; break; } else if( pos < nCurrentPos ) { // we missed the position requested without // finding an appropiate PES position DBG_STR((str, "VideoWrapper::SetStreamPos - Position not found\n")) m_nTempVideoBufferSize = dummyPes.payloadSize; m_nTempVideoBufferPtr = 0; bSuccess = false; break; } } } // Because this was a seek operation and we didnt succeed // remove the present data in the buffer because its no longer valid. if( !bSuccess ) FlushBuffer(); return bSuccess; } ui64 VideoWrapper::GetStreamPos() { // Lock the decoder object in this function CAutoLock lock(&m_csLockSection); // Get real current position // If this Pes was read backwards the // end position was already retrieved ui64 nCurrentPos = m_nPesPosState==PesBegin ? m_nEndPesPosition : CDemux::GetStreamPos(); return nCurrentPos - (m_nTempVideoBufferSize - m_nTempVideoBufferPtr ); } int VideoWrapper::GetError() { int temp; temp=error; error=0; return temp; } void VideoWrapper::updateVideoStats() { if(isMPEG2) { DAR = MPEG2aspect_ratio_Table[aspect_ratio_information]; } else{ DAR = (MPEG1aspect_ratio_Table[aspect_ratio_information]*(double)pictureHeight)/((double)pictureWidth); } frameRate=frameRateTable[frame_rate_code]; detectedFrameDelay=1/frameRateTable[detectedFrameRateCode]; frameDelay=1/frameRate; } // Stub for feeding data to MSSG video decoder bool VideoWrapper::ReadVideoData( ui8** ppBuffer, ui32* pBufferSize ) { if( m_nPesPosState == PesBegin ) CDemux::SetStreamPos( m_nEndPesPosition ); do{ m_bReadSuccess = ReadLPES((unsigned char **)&m_pTempVideoBuffer, &myPES); if(!m_bReadSuccess) break; if( myPES.streamID == subpic_streamID && myPES.subStreamID == subpic_substreamID ) { /* this is a subpic; parse it, and discard it so the video decoder * doesn't see it */ subpic_decode((unsigned char *)m_pTempVideoBuffer, myVideo->myPES.payloadSize, (int)((double)myVideo->myPES.PTS/27000.0) ); continue; } }while( myPES.streamID!=streamID); // We are feeding the video decoder // set the offset to what we've read. m_nTempVideoBufferSize = myPES.payloadSize; m_nTempVideoBufferPtr = myPES.payloadSize; m_nPesPosState = PesEnd; // Update buffers *ppBuffer = m_pTempVideoBuffer; *pBufferSize = myPES.payloadSize; return m_bReadSuccess; } bool VideoWrapper::Is24Progressive() { //This megamatic function will try to detect // the framerate of a stream, just looking into // the PTS from the incoming pictures. // This is necessary to detect 24 fps progressive sequences // that have the frame_rate set to 29.97 and use repeat_first_field flag // bool bSuccess = false; ui64 nStreamSize = GetStreamSize(); // Jump to the middle CDemux::SetStreamPos(nStreamSize>>1); Initialize_Buffer(); int nFramesToTry = 10; while( Get_Hdr() && nFramesToTry-- ) { if(picture_structure!=FRAME_PICTURE) break; // If its not 29.97 if( frame_rate_code!= 4 ) break; if(repeat_first_field){ bSuccess = true; break; } } return bSuccess; } bool VideoWrapper::BuildFrameDB() { // Set stream to the beginning. SetStreamPos(0); bool bSuccess = true; ui8 byte; ui32 nTemporalReference; while(bSuccess) { // Find next startcode prefix ui32 nStartCode = 0xFFFFFF00; while( (nStartCode&0x00FFFFFF)!=1 && bSuccess ) { bSuccess = GetFordByte(&byte); nStartCode = (nStartCode<<8) | byte; } GetFordByte(&byte); switch(byte) { case 0: // Picture start code // Get Start position m_sTempFrameInfo.nStartPos = GetStreamPos() - 4; GetFordByte(&byte); nTemporalReference = byte << 2; GetFordByte(&byte); nTemporalReference |= (byte&0xC0)>>6; // Get picture coding type m_sTempFrameInfo.picture_coding_type = (byte&0x3F)>>3; // If mpeg one, we're finished if(!isMPEG2) m_vFrames.push_back(m_sTempFrameInfo); break; case 0xB5: // extension start code GetFordByte(&byte); switch(byte>>4) { case 0x08: // picture extension GetFordByte(&byte); GetFordByte(&byte); // two last bits are picture_structure m_sTempFrameInfo.picture_structure = byte& 0x03; GetFordByte(&byte); // tff:8 bit rff:2nd bit m_sTempFrameInfo.top_field_first = byte & 0x80; m_sTempFrameInfo.repeat_first_field = byte & 0x02; GetFordByte(&byte); // progressive frame:8th bit m_sTempFrameInfo.progressive_frame = byte & 0x80; break; } m_vFrames.push_back(m_sTempFrameInfo); break; } } int size = m_vFrames.size(); return true; } TMPGVideoInfo * VideoWrapper::GetVideoInfo() { video_info.width = Coded_Picture_Width; video_info.height = Coded_Picture_Height; video_info.aspect_ratio_information = aspect_ratio_information; video_info.bit_rate_value = bit_rate_value; video_info.detected_frame_rate_code = detectedFrameRateCode; video_info.frame_rate_code = frame_rate_code; video_info.isMPEG2 = isMPEG2; video_info.progressive_frame = progressive_frame; video_info.progressive_sequence = progressive_sequence; return &video_info; }