DirectXTexMisc.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. //-------------------------------------------------------------------------------------
  2. // DirectXTexMisc.cpp
  3. //
  4. // DirectX Texture Library - Misc image operations
  5. //
  6. // THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
  7. // ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
  8. // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
  9. // PARTICULAR PURPOSE.
  10. //
  11. // Copyright (c) Microsoft Corporation. All rights reserved.
  12. //
  13. // http://go.microsoft.com/fwlink/?LinkId=248926
  14. //-------------------------------------------------------------------------------------
  15. #include "directxtexp.h"
  16. namespace DirectX
  17. {
  18. static const XMVECTORF32 g_Gamma22 = { 2.2f, 2.2f, 2.2f, 1.f };
  19. //-------------------------------------------------------------------------------------
  20. static HRESULT _ComputeMSE( _In_ const Image& image1, _In_ const Image& image2,
  21. _Out_ float& mse, _Out_writes_opt_(4) float* mseV,
  22. _In_ DWORD flags )
  23. {
  24. if ( !image1.pixels || !image2.pixels )
  25. return E_POINTER;
  26. assert( image1.width == image2.width && image1.height == image2.height );
  27. assert( !IsCompressed( image1.format ) && !IsCompressed( image2.format ) );
  28. const size_t width = image1.width;
  29. ScopedAlignedArrayXMVECTOR scanline( reinterpret_cast<XMVECTOR*>( _aligned_malloc( (sizeof(XMVECTOR)*width)*2, 16 ) ) );
  30. if ( !scanline )
  31. return E_OUTOFMEMORY;
  32. // Flags implied from image formats
  33. switch( image1.format )
  34. {
  35. case DXGI_FORMAT_B8G8R8X8_UNORM:
  36. flags |= CMSE_IGNORE_ALPHA;
  37. break;
  38. case DXGI_FORMAT_B8G8R8X8_UNORM_SRGB:
  39. flags |= CMSE_IMAGE1_SRGB | CMSE_IGNORE_ALPHA;
  40. break;
  41. case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB:
  42. case DXGI_FORMAT_BC1_UNORM_SRGB:
  43. case DXGI_FORMAT_BC2_UNORM_SRGB:
  44. case DXGI_FORMAT_BC3_UNORM_SRGB:
  45. case DXGI_FORMAT_B8G8R8A8_UNORM_SRGB:
  46. case DXGI_FORMAT_BC7_UNORM_SRGB:
  47. flags |= CMSE_IMAGE1_SRGB;
  48. break;
  49. }
  50. switch( image2.format )
  51. {
  52. case DXGI_FORMAT_B8G8R8X8_UNORM:
  53. flags |= CMSE_IGNORE_ALPHA;
  54. break;
  55. case DXGI_FORMAT_B8G8R8X8_UNORM_SRGB:
  56. flags |= CMSE_IMAGE2_SRGB | CMSE_IGNORE_ALPHA;
  57. break;
  58. case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB:
  59. case DXGI_FORMAT_BC1_UNORM_SRGB:
  60. case DXGI_FORMAT_BC2_UNORM_SRGB:
  61. case DXGI_FORMAT_BC3_UNORM_SRGB:
  62. case DXGI_FORMAT_B8G8R8A8_UNORM_SRGB:
  63. case DXGI_FORMAT_BC7_UNORM_SRGB:
  64. flags |= CMSE_IMAGE2_SRGB;
  65. break;
  66. }
  67. const uint8_t *pSrc1 = image1.pixels;
  68. const size_t rowPitch1 = image1.rowPitch;
  69. const uint8_t *pSrc2 = image2.pixels;
  70. const size_t rowPitch2 = image2.rowPitch;
  71. XMVECTOR acc = g_XMZero;
  72. static XMVECTORF32 two = { 2.0f, 2.0f, 2.0f, 2.0f };
  73. for( size_t h = 0; h < image1.height; ++h )
  74. {
  75. XMVECTOR* ptr1 = scanline.get();
  76. if ( !_LoadScanline( ptr1, width, pSrc1, rowPitch1, image1.format ) )
  77. return E_FAIL;
  78. XMVECTOR* ptr2 = scanline.get() + width;
  79. if ( !_LoadScanline( ptr2, width, pSrc2, rowPitch2, image2.format ) )
  80. return E_FAIL;
  81. for( size_t i = 0; i < width; ++i )
  82. {
  83. XMVECTOR v1 = *(ptr1++);
  84. if ( flags & CMSE_IMAGE1_SRGB )
  85. {
  86. v1 = XMVectorPow( v1, g_Gamma22 );
  87. }
  88. if ( flags & CMSE_IMAGE1_X2_BIAS )
  89. {
  90. v1 = XMVectorMultiplyAdd( v1, two, g_XMNegativeOne );
  91. }
  92. XMVECTOR v2 = *(ptr2++);
  93. if ( flags & CMSE_IMAGE2_SRGB )
  94. {
  95. v2 = XMVectorPow( v2, g_Gamma22 );
  96. }
  97. if ( flags & CMSE_IMAGE2_X2_BIAS )
  98. {
  99. v1 = XMVectorMultiplyAdd( v2, two, g_XMNegativeOne );
  100. }
  101. // sum[ (I1 - I2)^2 ]
  102. XMVECTOR v = XMVectorSubtract( v1, v2 );
  103. if ( flags & CMSE_IGNORE_RED )
  104. {
  105. v = XMVectorSelect( v, g_XMZero, g_XMMaskX );
  106. }
  107. if ( flags & CMSE_IGNORE_GREEN )
  108. {
  109. v = XMVectorSelect( v, g_XMZero, g_XMMaskY );
  110. }
  111. if ( flags & CMSE_IGNORE_BLUE )
  112. {
  113. v = XMVectorSelect( v, g_XMZero, g_XMMaskZ );
  114. }
  115. if ( flags & CMSE_IGNORE_ALPHA )
  116. {
  117. v = XMVectorSelect( v, g_XMZero, g_XMMaskW );
  118. }
  119. acc = XMVectorMultiplyAdd( v, v, acc );
  120. }
  121. pSrc1 += rowPitch1;
  122. pSrc2 += rowPitch2;
  123. }
  124. // MSE = sum[ (I1 - I2)^2 ] / w*h
  125. XMVECTOR d = XMVectorReplicate( float(image1.width * image1.height) );
  126. XMVECTOR v = XMVectorDivide( acc, d );
  127. if ( mseV )
  128. {
  129. XMStoreFloat4( reinterpret_cast<XMFLOAT4*>( mseV ), v );
  130. mse = mseV[0] + mseV[1] + mseV[2] + mseV[3];
  131. }
  132. else
  133. {
  134. XMFLOAT4 _mseV;
  135. XMStoreFloat4( &_mseV, v );
  136. mse = _mseV.x + _mseV.y + _mseV.z + _mseV.w;
  137. }
  138. return S_OK;
  139. }
  140. //=====================================================================================
  141. // Entry points
  142. //=====================================================================================
  143. //-------------------------------------------------------------------------------------
  144. // Copies a rectangle from one image into another
  145. //-------------------------------------------------------------------------------------
  146. _Use_decl_annotations_
  147. HRESULT CopyRectangle( const Image& srcImage, const Rect& srcRect, const Image& dstImage, DWORD filter, size_t xOffset, size_t yOffset )
  148. {
  149. if ( !srcImage.pixels || !dstImage.pixels )
  150. return E_POINTER;
  151. if ( IsCompressed( srcImage.format ) || IsCompressed( dstImage.format )
  152. || IsPlanar( srcImage.format ) || IsPlanar( dstImage.format )
  153. || IsPalettized( srcImage.format ) || IsPalettized( dstImage.format ) )
  154. return HRESULT_FROM_WIN32( ERROR_NOT_SUPPORTED );
  155. // Validate rectangle/offset
  156. if ( !srcRect.w || !srcRect.h || ( (srcRect.x + srcRect.w) > srcImage.width ) || ( (srcRect.y + srcRect.h) > srcImage.height ) )
  157. {
  158. return E_INVALIDARG;
  159. }
  160. if ( ( (xOffset + srcRect.w) > dstImage.width ) || ( (yOffset + srcRect.h) > dstImage.height ) )
  161. {
  162. return E_INVALIDARG;
  163. }
  164. // Compute source bytes-per-pixel
  165. size_t sbpp = BitsPerPixel( srcImage.format );
  166. if ( !sbpp )
  167. return E_FAIL;
  168. if ( sbpp < 8 )
  169. {
  170. // We don't support monochrome (DXGI_FORMAT_R1_UNORM)
  171. return HRESULT_FROM_WIN32( ERROR_NOT_SUPPORTED );
  172. }
  173. const uint8_t* pEndSrc = srcImage.pixels + srcImage.rowPitch*srcImage.height;
  174. const uint8_t* pEndDest = dstImage.pixels + dstImage.rowPitch*dstImage.height;
  175. // Round to bytes
  176. sbpp = ( sbpp + 7 ) / 8;
  177. const uint8_t* pSrc = srcImage.pixels + (srcRect.y * srcImage.rowPitch) + (srcRect.x * sbpp);
  178. if ( srcImage.format == dstImage.format )
  179. {
  180. // Direct copy case (avoid intermediate conversions)
  181. uint8_t* pDest = dstImage.pixels + (yOffset * dstImage.rowPitch) + (xOffset * sbpp);
  182. const size_t copyW = srcRect.w * sbpp;
  183. for( size_t h=0; h < srcRect.h; ++h )
  184. {
  185. if ( ( (pSrc+copyW) > pEndSrc ) || (pDest > pEndDest) )
  186. return E_FAIL;
  187. memcpy_s( pDest, pEndDest - pDest, pSrc, copyW );
  188. pSrc += srcImage.rowPitch;
  189. pDest += dstImage.rowPitch;
  190. }
  191. return S_OK;
  192. }
  193. // Compute destination bytes-per-pixel (not the same format as source)
  194. size_t dbpp = BitsPerPixel( dstImage.format );
  195. if ( !dbpp )
  196. return E_FAIL;
  197. if ( dbpp < 8 )
  198. {
  199. // We don't support monochrome (DXGI_FORMAT_R1_UNORM)
  200. return HRESULT_FROM_WIN32( ERROR_NOT_SUPPORTED );
  201. }
  202. // Round to bytes
  203. dbpp = ( dbpp + 7 ) / 8;
  204. uint8_t* pDest = dstImage.pixels + (yOffset * dstImage.rowPitch) + (xOffset * dbpp);
  205. ScopedAlignedArrayXMVECTOR scanline( reinterpret_cast<XMVECTOR*>( _aligned_malloc( (sizeof(XMVECTOR)*srcRect.w), 16 ) ) );
  206. if ( !scanline )
  207. return E_OUTOFMEMORY;
  208. const size_t copyS = srcRect.w * sbpp;
  209. const size_t copyD = srcRect.w * dbpp;
  210. for( size_t h=0; h < srcRect.h; ++h )
  211. {
  212. if ( ( (pSrc+copyS) > pEndSrc) || ((pDest+copyD) > pEndDest) )
  213. return E_FAIL;
  214. if ( !_LoadScanline( scanline.get(), srcRect.w, pSrc, copyS, srcImage.format ) )
  215. return E_FAIL;
  216. _ConvertScanline( scanline.get(), srcRect.w, dstImage.format, srcImage.format, filter );
  217. if ( !_StoreScanline( pDest, copyD, dstImage.format, scanline.get(), srcRect.w ) )
  218. return E_FAIL;
  219. pSrc += srcImage.rowPitch;
  220. pDest += dstImage.rowPitch;
  221. }
  222. return S_OK;
  223. }
  224. //-------------------------------------------------------------------------------------
  225. // Computes the Mean-Squared-Error (MSE) between two images
  226. //-------------------------------------------------------------------------------------
  227. _Use_decl_annotations_
  228. HRESULT ComputeMSE( const Image& image1, const Image& image2, float& mse, float* mseV, DWORD flags )
  229. {
  230. if ( !image1.pixels || !image2.pixels )
  231. return E_POINTER;
  232. if ( image1.width != image2.width || image1.height != image2.height )
  233. return E_INVALIDARG;
  234. if ( IsPlanar( image1.format ) || IsPlanar( image2.format )
  235. || IsPalettized( image1.format ) || IsPalettized( image2.format ) )
  236. return HRESULT_FROM_WIN32( ERROR_NOT_SUPPORTED );
  237. if ( IsCompressed(image1.format) )
  238. {
  239. if ( IsCompressed(image2.format) )
  240. {
  241. // Case 1: both images are compressed, expand to RGBA32F
  242. ScratchImage temp1;
  243. HRESULT hr = Decompress( image1, DXGI_FORMAT_R32G32B32A32_FLOAT, temp1 );
  244. if ( FAILED(hr) )
  245. return hr;
  246. ScratchImage temp2;
  247. hr = Decompress( image2, DXGI_FORMAT_R32G32B32A32_FLOAT, temp2 );
  248. if ( FAILED(hr) )
  249. return hr;
  250. const Image* img1 = temp1.GetImage(0,0,0);
  251. const Image* img2 = temp2.GetImage(0,0,0);
  252. if ( !img1 || !img2 )
  253. return E_POINTER;
  254. return _ComputeMSE( *img1, *img2, mse, mseV, flags );
  255. }
  256. else
  257. {
  258. // Case 2: image1 is compressed, expand to RGBA32F
  259. ScratchImage temp;
  260. HRESULT hr = Decompress( image1, DXGI_FORMAT_R32G32B32A32_FLOAT, temp );
  261. if ( FAILED(hr) )
  262. return hr;
  263. const Image* img = temp.GetImage(0,0,0);
  264. if ( !img )
  265. return E_POINTER;
  266. return _ComputeMSE( *img, image2, mse, mseV, flags );
  267. }
  268. }
  269. else
  270. {
  271. if ( IsCompressed(image2.format) )
  272. {
  273. // Case 3: image2 is compressed, expand to RGBA32F
  274. ScratchImage temp;
  275. HRESULT hr = Decompress( image2, DXGI_FORMAT_R32G32B32A32_FLOAT, temp );
  276. if ( FAILED(hr) )
  277. return hr;
  278. const Image* img = temp.GetImage(0,0,0);
  279. if ( !img )
  280. return E_POINTER;
  281. return _ComputeMSE( image1, *img, mse, mseV, flags );
  282. }
  283. else
  284. {
  285. // Case 4: neither image is compressed
  286. return _ComputeMSE( image1, image2, mse, mseV, flags );
  287. }
  288. }
  289. }
  290. }; // namespace