Path: blob/master/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/OcvTransform.cpp
16344 views
// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF1// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO2// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A3// PARTICULAR PURPOSE.4//5// Copyright (c) Microsoft Corporation. All rights reserved.67#include "OcvTransform.h"8#include "bufferlock.h"910#include <opencv2\core.hpp>11#include <opencv2\imgproc.hpp>12#include <opencv2\features2d.hpp>13141516using namespace Microsoft::WRL;1718/*1920This sample implements a video effect as a Media Foundation transform (MFT).2122NOTES ON THE MFT IMPLEMENTATION23241. The MFT has fixed streams: One input stream and one output stream.25262. The MFT supports NV12 format only.27283. If the MFT is holding an input sample, SetInputType and SetOutputType both fail.29304. The input and output types must be identical.31325. If both types are set, no type can be set until the current type is cleared.33346. Preferred input types:3536(a) If the output type is set, that's the preferred type.37(b) Otherwise, the preferred types are partial types, constructed from the38list of supported subtypes.39407. Preferred output types: As above.41428. Streaming:4344The private BeingStreaming() method is called in response to the45MFT_MESSAGE_NOTIFY_BEGIN_STREAMING message.4647If the client does not send MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, the MFT calls48BeginStreaming inside the first call to ProcessInput or ProcessOutput.4950This is a good approach for allocating resources that your MFT requires for51streaming.52539. The configuration attributes are applied in the BeginStreaming method. If the54client changes the attributes during streaming, the change is ignored until55streaming is stopped (either by changing the media types or by sending the56MFT_MESSAGE_NOTIFY_END_STREAMING message) and then restarted.5758*/596061// Static array of media types (preferred and accepted).62const GUID g_MediaSubtypes[] =63{64MFVideoFormat_NV1265};6667HRESULT GetDefaultStride(IMFMediaType *pType, LONG *plStride);6869template <typename T>70inline T clamp(const T& val, const T& minVal, const T& maxVal)71{72return (val < minVal ? minVal : (val > maxVal ? maxVal : val));73}7475OcvImageManipulations::OcvImageManipulations() :76m_pSample(NULL), m_pInputType(NULL), m_pOutputType(NULL),77m_imageWidthInPixels(0), m_imageHeightInPixels(0), m_cbImageSize(0),78m_TransformType(Preview), m_bStreamingInitialized(false),79m_pAttributes(NULL)80{81InitializeCriticalSectionEx(&m_critSec, 3000, 0);82}8384OcvImageManipulations::~OcvImageManipulations()85{86SafeRelease(&m_pInputType);87SafeRelease(&m_pOutputType);88SafeRelease(&m_pSample);89SafeRelease(&m_pAttributes);90DeleteCriticalSection(&m_critSec);91}9293// Initialize the instance.94STDMETHODIMP OcvImageManipulations::RuntimeClassInitialize()95{96// Create the attribute store.97return MFCreateAttributes(&m_pAttributes, 3);98}99100// IMediaExtension methods101102//-------------------------------------------------------------------103// SetProperties104// Sets the configuration of the effect105//-------------------------------------------------------------------106HRESULT OcvImageManipulations::SetProperties(ABI::Windows::Foundation::Collections::IPropertySet *pConfiguration)107{108HRESULT hr = S_OK;109110if (!pConfiguration)111return hr;112113HSTRING key;114WindowsCreateString(L"{698649BE-8EAE-4551-A4CB-3EC98FBD3D86}", 38, &key);115Microsoft::WRL::ComPtr<ABI::Windows::Foundation::Collections::IMap<HSTRING, IInspectable *>> spSetting;116pConfiguration->QueryInterface(IID_PPV_ARGS(&spSetting));117boolean found;118spSetting->HasKey(key, &found);119120if (found)121{122Microsoft::WRL::ComPtr<ABI::Windows::Foundation::IPropertyValue> spPropVal;123Microsoft::WRL::ComPtr<IInspectable> spInsp;124125spSetting->Lookup(key, spInsp.ReleaseAndGetAddressOf());126127hr = spInsp.As(&spPropVal);128if (hr != S_OK)129{130return hr;131}132133INT32 effect;134hr = spPropVal->GetInt32(&effect);135if (hr != S_OK)136{137return hr;138}139140if ((effect >= 0) && (effect < InvalidEffect))141{142m_TransformType = (ProcessingType)effect;143}144}145146return hr;147}148149// IMFTransform methods. Refer to the Media Foundation SDK documentation for details.150151//-------------------------------------------------------------------152// GetStreamLimits153// Returns the minimum and maximum number of streams.154//-------------------------------------------------------------------155156HRESULT OcvImageManipulations::GetStreamLimits(157DWORD *pdwInputMinimum,158DWORD *pdwInputMaximum,159DWORD *pdwOutputMinimum,160DWORD *pdwOutputMaximum161)162{163if ((pdwInputMinimum == NULL) ||164(pdwInputMaximum == NULL) ||165(pdwOutputMinimum == NULL) ||166(pdwOutputMaximum == NULL))167{168return E_POINTER;169}170171// This MFT has a fixed number of streams.172*pdwInputMinimum = 1;173*pdwInputMaximum = 1;174*pdwOutputMinimum = 1;175*pdwOutputMaximum = 1;176return S_OK;177}178179180//-------------------------------------------------------------------181// GetStreamCount182// Returns the actual number of streams.183//-------------------------------------------------------------------184185HRESULT OcvImageManipulations::GetStreamCount(186DWORD *pcInputStreams,187DWORD *pcOutputStreams188)189{190if ((pcInputStreams == NULL) || (pcOutputStreams == NULL))191192{193return E_POINTER;194}195196// This MFT has a fixed number of streams.197*pcInputStreams = 1;198*pcOutputStreams = 1;199return S_OK;200}201202203204//-------------------------------------------------------------------205// GetStreamIDs206// Returns stream IDs for the input and output streams.207//-------------------------------------------------------------------208209HRESULT OcvImageManipulations::GetStreamIDs(210DWORD dwInputIDArraySize,211DWORD *pdwInputIDs,212DWORD dwOutputIDArraySize,213DWORD *pdwOutputIDs214)215{216// It is not required to implement this method if the MFT has a fixed number of217// streams AND the stream IDs are numbered sequentially from zero (that is, the218// stream IDs match the stream indexes).219220// In that case, it is OK to return E_NOTIMPL.221return E_NOTIMPL;222}223224225//-------------------------------------------------------------------226// GetInputStreamInfo227// Returns information about an input stream.228//-------------------------------------------------------------------229230HRESULT OcvImageManipulations::GetInputStreamInfo(231DWORD dwInputStreamID,232MFT_INPUT_STREAM_INFO * pStreamInfo233)234{235if (pStreamInfo == NULL)236{237return E_POINTER;238}239240EnterCriticalSection(&m_critSec);241242if (!IsValidInputStream(dwInputStreamID))243{244LeaveCriticalSection(&m_critSec);245return MF_E_INVALIDSTREAMNUMBER;246}247248// NOTE: This method should succeed even when there is no media type on the249// stream. If there is no media type, we only need to fill in the dwFlags250// member of MFT_INPUT_STREAM_INFO. The other members depend on having a251// a valid media type.252253pStreamInfo->hnsMaxLatency = 0;254pStreamInfo->dwFlags = MFT_INPUT_STREAM_WHOLE_SAMPLES | MFT_INPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER;255256if (m_pInputType == NULL)257{258pStreamInfo->cbSize = 0;259}260else261{262pStreamInfo->cbSize = m_cbImageSize;263}264265pStreamInfo->cbMaxLookahead = 0;266pStreamInfo->cbAlignment = 0;267268LeaveCriticalSection(&m_critSec);269return S_OK;270}271272//-------------------------------------------------------------------273// GetOutputStreamInfo274// Returns information about an output stream.275//-------------------------------------------------------------------276277HRESULT OcvImageManipulations::GetOutputStreamInfo(278DWORD dwOutputStreamID,279MFT_OUTPUT_STREAM_INFO * pStreamInfo280)281{282if (pStreamInfo == NULL)283{284return E_POINTER;285}286287EnterCriticalSection(&m_critSec);288289if (!IsValidOutputStream(dwOutputStreamID))290{291LeaveCriticalSection(&m_critSec);292return MF_E_INVALIDSTREAMNUMBER;293}294295// NOTE: This method should succeed even when there is no media type on the296// stream. If there is no media type, we only need to fill in the dwFlags297// member of MFT_OUTPUT_STREAM_INFO. The other members depend on having a298// a valid media type.299300pStreamInfo->dwFlags =301MFT_OUTPUT_STREAM_WHOLE_SAMPLES |302MFT_OUTPUT_STREAM_SINGLE_SAMPLE_PER_BUFFER |303MFT_OUTPUT_STREAM_FIXED_SAMPLE_SIZE ;304305if (m_pOutputType == NULL)306{307pStreamInfo->cbSize = 0;308}309else310{311pStreamInfo->cbSize = m_cbImageSize;312}313314pStreamInfo->cbAlignment = 0;315316LeaveCriticalSection(&m_critSec);317return S_OK;318}319320321//-------------------------------------------------------------------322// GetAttributes323// Returns the attributes for the MFT.324//-------------------------------------------------------------------325326HRESULT OcvImageManipulations::GetAttributes(IMFAttributes** ppAttributes)327{328if (ppAttributes == NULL)329{330return E_POINTER;331}332333EnterCriticalSection(&m_critSec);334335*ppAttributes = m_pAttributes;336(*ppAttributes)->AddRef();337338LeaveCriticalSection(&m_critSec);339return S_OK;340}341342343//-------------------------------------------------------------------344// GetInputStreamAttributes345// Returns stream-level attributes for an input stream.346//-------------------------------------------------------------------347348HRESULT OcvImageManipulations::GetInputStreamAttributes(349DWORD dwInputStreamID,350IMFAttributes **ppAttributes351)352{353// This MFT does not support any stream-level attributes, so the method is not implemented.354return E_NOTIMPL;355}356357358//-------------------------------------------------------------------359// GetOutputStreamAttributes360// Returns stream-level attributes for an output stream.361//-------------------------------------------------------------------362363HRESULT OcvImageManipulations::GetOutputStreamAttributes(364DWORD dwOutputStreamID,365IMFAttributes **ppAttributes366)367{368// This MFT does not support any stream-level attributes, so the method is not implemented.369return E_NOTIMPL;370}371372373//-------------------------------------------------------------------374// DeleteInputStream375//-------------------------------------------------------------------376377HRESULT OcvImageManipulations::DeleteInputStream(DWORD dwStreamID)378{379// This MFT has a fixed number of input streams, so the method is not supported.380return E_NOTIMPL;381}382383384//-------------------------------------------------------------------385// AddInputStreams386//-------------------------------------------------------------------387388HRESULT OcvImageManipulations::AddInputStreams(389DWORD cStreams,390DWORD *adwStreamIDs391)392{393// This MFT has a fixed number of output streams, so the method is not supported.394return E_NOTIMPL;395}396397398//-------------------------------------------------------------------399// GetInputAvailableType400// Returns a preferred input type.401//-------------------------------------------------------------------402403HRESULT OcvImageManipulations::GetInputAvailableType(404DWORD dwInputStreamID,405DWORD dwTypeIndex, // 0-based406IMFMediaType **ppType407)408{409if (ppType == NULL)410{411return E_INVALIDARG;412}413414EnterCriticalSection(&m_critSec);415416if (!IsValidInputStream(dwInputStreamID))417{418LeaveCriticalSection(&m_critSec);419return MF_E_INVALIDSTREAMNUMBER;420}421422HRESULT hr = S_OK;423424// If the output type is set, return that type as our preferred input type.425if (m_pOutputType == NULL)426{427// The output type is not set. Create a partial media type.428hr = OnGetPartialType(dwTypeIndex, ppType);429}430else if (dwTypeIndex > 0)431{432hr = MF_E_NO_MORE_TYPES;433}434else435{436*ppType = m_pOutputType;437(*ppType)->AddRef();438}439440LeaveCriticalSection(&m_critSec);441return hr;442}443444445446//-------------------------------------------------------------------447// GetOutputAvailableType448// Returns a preferred output type.449//-------------------------------------------------------------------450451HRESULT OcvImageManipulations::GetOutputAvailableType(452DWORD dwOutputStreamID,453DWORD dwTypeIndex, // 0-based454IMFMediaType **ppType455)456{457if (ppType == NULL)458{459return E_INVALIDARG;460}461462EnterCriticalSection(&m_critSec);463464if (!IsValidOutputStream(dwOutputStreamID))465{466LeaveCriticalSection(&m_critSec);467return MF_E_INVALIDSTREAMNUMBER;468}469470HRESULT hr = S_OK;471472if (m_pInputType == NULL)473{474// The input type is not set. Create a partial media type.475hr = OnGetPartialType(dwTypeIndex, ppType);476}477else if (dwTypeIndex > 0)478{479hr = MF_E_NO_MORE_TYPES;480}481else482{483*ppType = m_pInputType;484(*ppType)->AddRef();485}486487LeaveCriticalSection(&m_critSec);488return hr;489}490491492//-------------------------------------------------------------------493// SetInputType494//-------------------------------------------------------------------495496HRESULT OcvImageManipulations::SetInputType(497DWORD dwInputStreamID,498IMFMediaType *pType, // Can be NULL to clear the input type.499DWORD dwFlags500)501{502// Validate flags.503if (dwFlags & ~MFT_SET_TYPE_TEST_ONLY)504{505return E_INVALIDARG;506}507508EnterCriticalSection(&m_critSec);509510if (!IsValidInputStream(dwInputStreamID))511{512LeaveCriticalSection(&m_critSec);513return MF_E_INVALIDSTREAMNUMBER;514}515516HRESULT hr = S_OK;517518// Does the caller want us to set the type, or just test it?519BOOL bReallySet = ((dwFlags & MFT_SET_TYPE_TEST_ONLY) == 0);520521// If we have an input sample, the client cannot change the type now.522if (HasPendingOutput())523{524hr = MF_E_TRANSFORM_CANNOT_CHANGE_MEDIATYPE_WHILE_PROCESSING;525goto done;526}527528// Validate the type, if non-NULL.529if (pType)530{531hr = OnCheckInputType(pType);532if (FAILED(hr))533{534goto done;535}536}537538// The type is OK. Set the type, unless the caller was just testing.539if (bReallySet)540{541OnSetInputType(pType);542543// When the type changes, end streaming.544hr = EndStreaming();545}546547done:548LeaveCriticalSection(&m_critSec);549return hr;550}551552553554//-------------------------------------------------------------------555// SetOutputType556//-------------------------------------------------------------------557558HRESULT OcvImageManipulations::SetOutputType(559DWORD dwOutputStreamID,560IMFMediaType *pType, // Can be NULL to clear the output type.561DWORD dwFlags562)563{564// Validate flags.565if (dwFlags & ~MFT_SET_TYPE_TEST_ONLY)566{567return E_INVALIDARG;568}569570EnterCriticalSection(&m_critSec);571572if (!IsValidOutputStream(dwOutputStreamID))573{574LeaveCriticalSection(&m_critSec);575return MF_E_INVALIDSTREAMNUMBER;576}577578HRESULT hr = S_OK;579580// Does the caller want us to set the type, or just test it?581BOOL bReallySet = ((dwFlags & MFT_SET_TYPE_TEST_ONLY) == 0);582583// If we have an input sample, the client cannot change the type now.584if (HasPendingOutput())585{586hr = MF_E_TRANSFORM_CANNOT_CHANGE_MEDIATYPE_WHILE_PROCESSING;587goto done;588}589590// Validate the type, if non-NULL.591if (pType)592{593hr = OnCheckOutputType(pType);594if (FAILED(hr))595{596goto done;597}598}599600// The type is OK. Set the type, unless the caller was just testing.601if (bReallySet)602{603OnSetOutputType(pType);604605// When the type changes, end streaming.606hr = EndStreaming();607}608609done:610LeaveCriticalSection(&m_critSec);611return hr;612}613614615//-------------------------------------------------------------------616// GetInputCurrentType617// Returns the current input type.618//-------------------------------------------------------------------619620HRESULT OcvImageManipulations::GetInputCurrentType(621DWORD dwInputStreamID,622IMFMediaType **ppType623)624{625if (ppType == NULL)626{627return E_POINTER;628}629630HRESULT hr = S_OK;631632EnterCriticalSection(&m_critSec);633634if (!IsValidInputStream(dwInputStreamID))635{636hr = MF_E_INVALIDSTREAMNUMBER;637}638else if (!m_pInputType)639{640hr = MF_E_TRANSFORM_TYPE_NOT_SET;641}642else643{644*ppType = m_pInputType;645(*ppType)->AddRef();646}647LeaveCriticalSection(&m_critSec);648return hr;649}650651652//-------------------------------------------------------------------653// GetOutputCurrentType654// Returns the current output type.655//-------------------------------------------------------------------656657HRESULT OcvImageManipulations::GetOutputCurrentType(658DWORD dwOutputStreamID,659IMFMediaType **ppType660)661{662if (ppType == NULL)663{664return E_POINTER;665}666667HRESULT hr = S_OK;668669EnterCriticalSection(&m_critSec);670671if (!IsValidOutputStream(dwOutputStreamID))672{673hr = MF_E_INVALIDSTREAMNUMBER;674}675else if (!m_pOutputType)676{677hr = MF_E_TRANSFORM_TYPE_NOT_SET;678}679else680{681*ppType = m_pOutputType;682(*ppType)->AddRef();683}684685LeaveCriticalSection(&m_critSec);686return hr;687}688689690//-------------------------------------------------------------------691// GetInputStatus692// Query if the MFT is accepting more input.693//-------------------------------------------------------------------694695HRESULT OcvImageManipulations::GetInputStatus(696DWORD dwInputStreamID,697DWORD *pdwFlags698)699{700if (pdwFlags == NULL)701{702return E_POINTER;703}704705EnterCriticalSection(&m_critSec);706707if (!IsValidInputStream(dwInputStreamID))708{709LeaveCriticalSection(&m_critSec);710return MF_E_INVALIDSTREAMNUMBER;711}712713// If an input sample is already queued, do not accept another sample until the714// client calls ProcessOutput or Flush.715716// NOTE: It is possible for an MFT to accept more than one input sample. For717// example, this might be required in a video decoder if the frames do not718// arrive in temporal order. In the case, the decoder must hold a queue of719// samples. For the video effect, each sample is transformed independently, so720// there is no reason to queue multiple input samples.721722if (m_pSample == NULL)723{724*pdwFlags = MFT_INPUT_STATUS_ACCEPT_DATA;725}726else727{728*pdwFlags = 0;729}730731LeaveCriticalSection(&m_critSec);732return S_OK;733}734735736737//-------------------------------------------------------------------738// GetOutputStatus739// Query if the MFT can produce output.740//-------------------------------------------------------------------741742HRESULT OcvImageManipulations::GetOutputStatus(DWORD *pdwFlags)743{744if (pdwFlags == NULL)745{746return E_POINTER;747}748749EnterCriticalSection(&m_critSec);750751// The MFT can produce an output sample if (and only if) there an input sample.752if (m_pSample != NULL)753{754*pdwFlags = MFT_OUTPUT_STATUS_SAMPLE_READY;755}756else757{758*pdwFlags = 0;759}760761LeaveCriticalSection(&m_critSec);762return S_OK;763}764765766//-------------------------------------------------------------------767// SetOutputBounds768// Sets the range of time stamps that the MFT will output.769//-------------------------------------------------------------------770771HRESULT OcvImageManipulations::SetOutputBounds(772LONGLONG hnsLowerBound,773LONGLONG hnsUpperBound774)775{776// Implementation of this method is optional.777return E_NOTIMPL;778}779780781//-------------------------------------------------------------------782// ProcessEvent783// Sends an event to an input stream.784//-------------------------------------------------------------------785786HRESULT OcvImageManipulations::ProcessEvent(787DWORD dwInputStreamID,788IMFMediaEvent *pEvent789)790{791// This MFT does not handle any stream events, so the method can792// return E_NOTIMPL. This tells the pipeline that it can stop793// sending any more events to this MFT.794return E_NOTIMPL;795}796797798//-------------------------------------------------------------------799// ProcessMessage800//-------------------------------------------------------------------801802HRESULT OcvImageManipulations::ProcessMessage(803MFT_MESSAGE_TYPE eMessage,804ULONG_PTR ulParam805)806{807EnterCriticalSection(&m_critSec);808809HRESULT hr = S_OK;810811switch (eMessage)812{813case MFT_MESSAGE_COMMAND_FLUSH:814// Flush the MFT.815hr = OnFlush();816break;817818case MFT_MESSAGE_COMMAND_DRAIN:819// Drain: Tells the MFT to reject further input until all pending samples are820// processed. That is our default behavior already, so there is nothing to do.821//822// For a decoder that accepts a queue of samples, the MFT might need to drain823// the queue in response to this command.824break;825826case MFT_MESSAGE_SET_D3D_MANAGER:827// Sets a pointer to the IDirect3DDeviceManager9 interface.828829// The pipeline should never send this message unless the MFT sets the MF_SA_D3D_AWARE830// attribute set to TRUE. Because this MFT does not set MF_SA_D3D_AWARE, it is an error831// to send the MFT_MESSAGE_SET_D3D_MANAGER message to the MFT. Return an error code in832// this case.833834// NOTE: If this MFT were D3D-enabled, it would cache the IDirect3DDeviceManager9835// pointer for use during streaming.836837hr = E_NOTIMPL;838break;839840case MFT_MESSAGE_NOTIFY_BEGIN_STREAMING:841hr = BeginStreaming();842break;843844case MFT_MESSAGE_NOTIFY_END_STREAMING:845hr = EndStreaming();846break;847848// The next two messages do not require any action from this MFT.849850case MFT_MESSAGE_NOTIFY_END_OF_STREAM:851break;852853case MFT_MESSAGE_NOTIFY_START_OF_STREAM:854break;855}856857LeaveCriticalSection(&m_critSec);858return hr;859}860861862//-------------------------------------------------------------------863// ProcessInput864// Process an input sample.865//-------------------------------------------------------------------866867HRESULT OcvImageManipulations::ProcessInput(868DWORD dwInputStreamID,869IMFSample *pSample,870DWORD dwFlags871)872{873// Check input parameters.874if (pSample == NULL)875{876return E_POINTER;877}878879if (dwFlags != 0)880{881return E_INVALIDARG; // dwFlags is reserved and must be zero.882}883884HRESULT hr = S_OK;885886EnterCriticalSection(&m_critSec);887888// Validate the input stream number.889if (!IsValidInputStream(dwInputStreamID))890{891hr = MF_E_INVALIDSTREAMNUMBER;892goto done;893}894895// Check for valid media types.896// The client must set input and output types before calling ProcessInput.897if (!m_pInputType || !m_pOutputType)898{899hr = MF_E_NOTACCEPTING;900goto done;901}902903// Check if an input sample is already queued.904if (m_pSample != NULL)905{906hr = MF_E_NOTACCEPTING; // We already have an input sample.907goto done;908}909910// Initialize streaming.911hr = BeginStreaming();912if (FAILED(hr))913{914goto done;915}916917// Cache the sample. We do the actual work in ProcessOutput.918m_pSample = pSample;919pSample->AddRef(); // Hold a reference count on the sample.920921done:922LeaveCriticalSection(&m_critSec);923return hr;924}925926927//-------------------------------------------------------------------928// ProcessOutput929// Process an output sample.930//-------------------------------------------------------------------931932HRESULT OcvImageManipulations::ProcessOutput(933DWORD dwFlags,934DWORD cOutputBufferCount,935MFT_OUTPUT_DATA_BUFFER *pOutputSamples, // one per stream936DWORD *pdwStatus937)938{939// Check input parameters...940941// This MFT does not accept any flags for the dwFlags parameter.942943// The only defined flag is MFT_PROCESS_OUTPUT_DISCARD_WHEN_NO_BUFFER. This flag944// applies only when the MFT marks an output stream as lazy or optional. But this945// MFT has no lazy or optional streams, so the flag is not valid.946947if (dwFlags != 0)948{949return E_INVALIDARG;950}951952if (pOutputSamples == NULL || pdwStatus == NULL)953{954return E_POINTER;955}956957// There must be exactly one output buffer.958if (cOutputBufferCount != 1)959{960return E_INVALIDARG;961}962963// It must contain a sample.964if (pOutputSamples[0].pSample == NULL)965{966return E_INVALIDARG;967}968969HRESULT hr = S_OK;970971IMFMediaBuffer *pInput = NULL;972IMFMediaBuffer *pOutput = NULL;973974EnterCriticalSection(&m_critSec);975976// There must be an input sample available for processing.977if (m_pSample == NULL)978{979hr = MF_E_TRANSFORM_NEED_MORE_INPUT;980goto done;981}982983// Initialize streaming.984985hr = BeginStreaming();986if (FAILED(hr))987{988goto done;989}990991// Get the input buffer.992hr = m_pSample->ConvertToContiguousBuffer(&pInput);993if (FAILED(hr))994{995goto done;996}997998// Get the output buffer.999hr = pOutputSamples[0].pSample->ConvertToContiguousBuffer(&pOutput);1000if (FAILED(hr))1001{1002goto done;1003}10041005hr = OnProcessOutput(pInput, pOutput);1006if (FAILED(hr))1007{1008goto done;1009}10101011// Set status flags.1012pOutputSamples[0].dwStatus = 0;1013*pdwStatus = 0;101410151016// Copy the duration and time stamp from the input sample, if present.10171018LONGLONG hnsDuration = 0;1019LONGLONG hnsTime = 0;10201021if (SUCCEEDED(m_pSample->GetSampleDuration(&hnsDuration)))1022{1023hr = pOutputSamples[0].pSample->SetSampleDuration(hnsDuration);1024if (FAILED(hr))1025{1026goto done;1027}1028}10291030if (SUCCEEDED(m_pSample->GetSampleTime(&hnsTime)))1031{1032hr = pOutputSamples[0].pSample->SetSampleTime(hnsTime);1033}10341035done:1036SafeRelease(&m_pSample); // Release our input sample.1037SafeRelease(&pInput);1038SafeRelease(&pOutput);1039LeaveCriticalSection(&m_critSec);1040return hr;1041}10421043// PRIVATE METHODS10441045// All methods that follow are private to this MFT and are not part of the IMFTransform interface.10461047// Create a partial media type from our list.1048//1049// dwTypeIndex: Index into the list of peferred media types.1050// ppmt: Receives a pointer to the media type.10511052HRESULT OcvImageManipulations::OnGetPartialType(DWORD dwTypeIndex, IMFMediaType **ppmt)1053{1054if (dwTypeIndex >= ARRAYSIZE(g_MediaSubtypes))1055{1056return MF_E_NO_MORE_TYPES;1057}10581059IMFMediaType *pmt = NULL;10601061HRESULT hr = MFCreateMediaType(&pmt);1062if (FAILED(hr))1063{1064goto done;1065}10661067hr = pmt->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);1068if (FAILED(hr))1069{1070goto done;1071}10721073hr = pmt->SetGUID(MF_MT_SUBTYPE, g_MediaSubtypes[dwTypeIndex]);1074if (FAILED(hr))1075{1076goto done;1077}10781079*ppmt = pmt;1080(*ppmt)->AddRef();10811082done:1083SafeRelease(&pmt);1084return hr;1085}108610871088// Validate an input media type.10891090HRESULT OcvImageManipulations::OnCheckInputType(IMFMediaType *pmt)1091{1092assert(pmt != NULL);10931094HRESULT hr = S_OK;10951096// If the output type is set, see if they match.1097if (m_pOutputType != NULL)1098{1099DWORD flags = 0;1100hr = pmt->IsEqual(m_pOutputType, &flags);11011102// IsEqual can return S_FALSE. Treat this as failure.1103if (hr != S_OK)1104{1105hr = MF_E_INVALIDMEDIATYPE;1106}1107}1108else1109{1110// Output type is not set. Just check this type.1111hr = OnCheckMediaType(pmt);1112}1113return hr;1114}111511161117// Validate an output media type.11181119HRESULT OcvImageManipulations::OnCheckOutputType(IMFMediaType *pmt)1120{1121assert(pmt != NULL);11221123HRESULT hr = S_OK;11241125// If the input type is set, see if they match.1126if (m_pInputType != NULL)1127{1128DWORD flags = 0;1129hr = pmt->IsEqual(m_pInputType, &flags);11301131// IsEqual can return S_FALSE. Treat this as failure.1132if (hr != S_OK)1133{1134hr = MF_E_INVALIDMEDIATYPE;1135}11361137}1138else1139{1140// Input type is not set. Just check this type.1141hr = OnCheckMediaType(pmt);1142}1143return hr;1144}114511461147// Validate a media type (input or output)11481149HRESULT OcvImageManipulations::OnCheckMediaType(IMFMediaType *pmt)1150{1151BOOL bFoundMatchingSubtype = FALSE;11521153// Major type must be video.1154GUID major_type;1155HRESULT hr = pmt->GetGUID(MF_MT_MAJOR_TYPE, &major_type);1156if (FAILED(hr))1157{1158goto done;1159}11601161if (major_type != MFMediaType_Video)1162{1163hr = MF_E_INVALIDMEDIATYPE;1164goto done;1165}11661167// Subtype must be one of the subtypes in our global list.11681169// Get the subtype GUID.1170GUID subtype;1171hr = pmt->GetGUID(MF_MT_SUBTYPE, &subtype);1172if (FAILED(hr))1173{1174goto done;1175}11761177// Look for the subtype in our list of accepted types.1178for (DWORD i = 0; i < ARRAYSIZE(g_MediaSubtypes); i++)1179{1180if (subtype == g_MediaSubtypes[i])1181{1182bFoundMatchingSubtype = TRUE;1183break;1184}1185}11861187if (!bFoundMatchingSubtype)1188{1189hr = MF_E_INVALIDMEDIATYPE; // The MFT does not support this subtype.1190goto done;1191}11921193// Reject single-field media types.1194UINT32 interlace = MFGetAttributeUINT32(pmt, MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);1195if (interlace == MFVideoInterlace_FieldSingleUpper || interlace == MFVideoInterlace_FieldSingleLower)1196{1197hr = MF_E_INVALIDMEDIATYPE;1198}11991200done:1201return hr;1202}120312041205// Set or clear the input media type.1206//1207// Prerequisite: The input type was already validated.12081209void OcvImageManipulations::OnSetInputType(IMFMediaType *pmt)1210{1211// if pmt is NULL, clear the type.1212// if pmt is non-NULL, set the type.12131214SafeRelease(&m_pInputType);1215m_pInputType = pmt;1216if (m_pInputType)1217{1218m_pInputType->AddRef();1219}12201221// Update the format information.1222UpdateFormatInfo();1223}122412251226// Set or clears the output media type.1227//1228// Prerequisite: The output type was already validated.12291230void OcvImageManipulations::OnSetOutputType(IMFMediaType *pmt)1231{1232// If pmt is NULL, clear the type. Otherwise, set the type.12331234SafeRelease(&m_pOutputType);1235m_pOutputType = pmt;1236if (m_pOutputType)1237{1238m_pOutputType->AddRef();1239}1240}124112421243// Initialize streaming parameters.1244//1245// This method is called if the client sends the MFT_MESSAGE_NOTIFY_BEGIN_STREAMING1246// message, or when the client processes a sample, whichever happens first.12471248HRESULT OcvImageManipulations::BeginStreaming()1249{1250HRESULT hr = S_OK;12511252if (!m_bStreamingInitialized)1253{1254m_bStreamingInitialized = true;1255hr = S_OK;1256}12571258return hr;1259}126012611262// End streaming.12631264// This method is called if the client sends an MFT_MESSAGE_NOTIFY_END_STREAMING1265// message, or when the media type changes. In general, it should be called whenever1266// the streaming parameters need to be reset.12671268HRESULT OcvImageManipulations::EndStreaming()1269{1270m_bStreamingInitialized = false;1271return S_OK;1272}1273127412751276// Generate output data.12771278HRESULT OcvImageManipulations::OnProcessOutput(IMFMediaBuffer *pIn, IMFMediaBuffer *pOut)1279{1280BYTE *pDest = NULL; // Destination buffer.1281LONG lDestStride = 0; // Destination stride.12821283BYTE *pSrc = NULL; // Source buffer.1284LONG lSrcStride = 0; // Source stride.12851286// Helper objects to lock the buffers.1287VideoBufferLock inputLock(pIn);1288VideoBufferLock outputLock(pOut);12891290// Stride if the buffer does not support IMF2DBuffer1291LONG lDefaultStride = 0;12921293HRESULT hr = GetDefaultStride(m_pInputType, &lDefaultStride);1294if (FAILED(hr))1295{1296return hr;1297}12981299// Lock the input buffer.1300hr = inputLock.LockBuffer(lDefaultStride, m_imageHeightInPixels, &pSrc, &lSrcStride);1301if (FAILED(hr))1302{1303return hr;1304}13051306// Lock the output buffer.1307hr = outputLock.LockBuffer(lDefaultStride, m_imageHeightInPixels, &pDest, &lDestStride);1308if (FAILED(hr))1309{1310return hr;1311}13121313cv::Mat InputFrame(m_imageHeightInPixels + m_imageHeightInPixels/2, m_imageWidthInPixels, CV_8UC1, pSrc, lSrcStride);1314cv::Mat InputGreyScale(InputFrame, cv::Range(0, m_imageHeightInPixels), cv::Range(0, m_imageWidthInPixels));1315cv::Mat OutputFrame(m_imageHeightInPixels + m_imageHeightInPixels/2, m_imageWidthInPixels, CV_8UC1, pDest, lDestStride);13161317switch (m_TransformType)1318{1319case Preview:1320{1321InputFrame.copyTo(OutputFrame);1322} break;1323case GrayScale:1324{1325OutputFrame.setTo(cv::Scalar(128));1326cv::Mat OutputGreyScale(OutputFrame, cv::Range(0, m_imageHeightInPixels), cv::Range(0, m_imageWidthInPixels));1327InputGreyScale.copyTo(OutputGreyScale);1328} break;1329case Canny:1330{1331OutputFrame.setTo(cv::Scalar(128));1332cv::Mat OutputGreyScale(OutputFrame, cv::Range(0, m_imageHeightInPixels), cv::Range(0, m_imageWidthInPixels));1333cv::Canny(InputGreyScale, OutputGreyScale, 80, 90);13341335} break;1336case Sobel:1337{1338OutputFrame.setTo(cv::Scalar(128));1339cv::Mat OutputGreyScale(OutputFrame, cv::Range(0, m_imageHeightInPixels), cv::Range(0, m_imageWidthInPixels));1340cv::Sobel(InputGreyScale, OutputGreyScale, CV_8U, 1, 1);1341} break;1342case Histogram:1343{1344const int mHistSizeNum = 25;1345const int channels[3][1] = {{0}, {1}, {2}};1346const int mHistSize[] = {25};1347const float baseRabge[] = {0.f,256.f};1348const float* ranges[] = {baseRabge};13491350const cv::Scalar mColorsY[] = { cv::Scalar(76), cv::Scalar(149), cv::Scalar(29) };1351const cv::Scalar mColorsUV[] = { cv::Scalar(84, 255), cv::Scalar(43, 21), cv::Scalar(255, 107) };13521353cv::Mat OutputY(m_imageHeightInPixels, m_imageWidthInPixels, CV_8UC1, pDest, lDestStride);1354cv::Mat OutputUV(m_imageHeightInPixels/2, m_imageWidthInPixels/2,1355CV_8UC2, pDest+m_imageHeightInPixels*lDestStride, lDestStride);1356cv::Mat BgrFrame;13571358InputFrame.copyTo(OutputFrame);13591360cv::cvtColor(InputFrame, BgrFrame, cv::COLOR_YUV420sp2BGR);1361int thikness = (int) (BgrFrame.cols / (mHistSizeNum + 10) / 5);1362if(thikness > 5) thikness = 5;1363int offset = (int) ((BgrFrame.cols - (5*mHistSizeNum + 4*10)*thikness)/2);13641365// RGB1366for (int c=0; c<3; c++)1367{1368cv::Mat hist;1369cv::calcHist(&BgrFrame, 1, channels[c], cv::Mat(), hist, 1, mHistSize, ranges);1370cv::normalize(hist, hist, BgrFrame.rows/2, 0, cv::NORM_INF);1371for(int h=0; h<mHistSizeNum; h++) {1372cv::Point mP1, mP2;1373// Draw on Y plane1374mP1.x = mP2.x = offset + (c * (mHistSizeNum + 10) + h) * thikness;1375mP1.y = BgrFrame.rows-1;1376mP2.y = mP1.y - 2 - (int)hist.at<float>(h);1377cv::line(OutputY, mP1, mP2, mColorsY[c], thikness);13781379// Draw on UV planes1380mP1.x /= 2;1381mP1.y /= 2;1382mP2.x /= 2;1383mP2.y /= 2;1384cv::line(OutputUV, mP1, mP2, mColorsUV[c], thikness/2);1385}1386}1387} break;1388default:1389break;1390}13911392// Set the data size on the output buffer.1393hr = pOut->SetCurrentLength(m_cbImageSize);13941395return hr;1396}139713981399// Flush the MFT.14001401HRESULT OcvImageManipulations::OnFlush()1402{1403// For this MFT, flushing just means releasing the input sample.1404SafeRelease(&m_pSample);1405return S_OK;1406}140714081409// Update the format information. This method is called whenever the1410// input type is set.14111412HRESULT OcvImageManipulations::UpdateFormatInfo()1413{1414HRESULT hr = S_OK;14151416GUID subtype = GUID_NULL;14171418m_imageWidthInPixels = 0;1419m_imageHeightInPixels = 0;1420m_cbImageSize = 0;14211422if (m_pInputType != NULL)1423{1424hr = m_pInputType->GetGUID(MF_MT_SUBTYPE, &subtype);1425if (FAILED(hr))1426{1427goto done;1428}1429if (subtype != MFVideoFormat_NV12)1430{1431hr = E_UNEXPECTED;1432goto done;1433}14341435hr = MFGetAttributeSize(m_pInputType, MF_MT_FRAME_SIZE, &m_imageWidthInPixels, &m_imageHeightInPixels);1436if (FAILED(hr))1437{1438goto done;1439}14401441// Calculate the image size for YUV NV12 image(not including padding)1442m_cbImageSize = (m_imageHeightInPixels + m_imageHeightInPixels/2)*m_imageWidthInPixels;1443}14441445done:1446return hr;1447}144814491450// Get the default stride for a video format.1451HRESULT GetDefaultStride(IMFMediaType *pType, LONG *plStride)1452{1453LONG lStride = 0;14541455// Try to get the default stride from the media type.1456HRESULT hr = pType->GetUINT32(MF_MT_DEFAULT_STRIDE, (UINT32*)&lStride);1457if (FAILED(hr))1458{1459// Attribute not set. Try to calculate the default stride.1460GUID subtype = GUID_NULL;14611462UINT32 width = 0;1463UINT32 height = 0;14641465// Get the subtype and the image size.1466hr = pType->GetGUID(MF_MT_SUBTYPE, &subtype);1467if (SUCCEEDED(hr))1468{1469hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &width, &height);1470}1471if (SUCCEEDED(hr))1472{1473if (subtype == MFVideoFormat_NV12)1474{1475lStride = width;1476}1477else if (subtype == MFVideoFormat_YUY2 || subtype == MFVideoFormat_UYVY)1478{1479lStride = ((width * 2) + 3) & ~3;1480}1481else1482{1483hr = E_INVALIDARG;1484}1485}14861487// Set the attribute for later reference.1488if (SUCCEEDED(hr))1489{1490(void)pType->SetUINT32(MF_MT_DEFAULT_STRIDE, UINT32(lStride));1491}1492}1493if (SUCCEEDED(hr))1494{1495*plStride = lStride;1496}1497return hr;1498}149915001501