Joe Verbout
/
main
opencv on mbed
Embed:
(wiki syntax)
Show/hide line numbers
photo.hpp
00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 00014 // Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved. 00015 // Third party copyrights are property of their respective owners. 00016 // 00017 // Redistribution and use in source and binary forms, with or without modification, 00018 // are permitted provided that the following conditions are met: 00019 // 00020 // * Redistribution's of source code must retain the above copyright notice, 00021 // this list of conditions and the following disclaimer. 00022 // 00023 // * Redistribution's in binary form must reproduce the above copyright notice, 00024 // this list of conditions and the following disclaimer in the documentation 00025 // and/or other materials provided with the distribution. 00026 // 00027 // * The name of the copyright holders may not be used to endorse or promote products 00028 // derived from this software without specific prior written permission. 00029 // 00030 // This software is provided by the copyright holders and contributors "as is" and 00031 // any express or implied warranties, including, but not limited to, the implied 00032 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00033 // In no event shall the Intel Corporation or contributors be liable for any direct, 00034 // indirect, incidental, special, exemplary, or consequential damages 00035 // (including, but not limited to, procurement of substitute goods or services; 00036 // loss of use, data, or profits; or business interruption) however caused 00037 // and on any theory of liability, whether in contract, strict liability, 00038 // or tort (including negligence or otherwise) arising in any way out of 00039 // the use of this software, even if advised of the possibility of such damage. 00040 // 00041 //M*/ 00042 00043 #ifndef __OPENCV_PHOTO_HPP__ 00044 #define __OPENCV_PHOTO_HPP__ 00045 00046 #include "opencv2/core.hpp" 00047 #include "opencv2/imgproc.hpp" 00048 00049 /** 00050 @defgroup photo Computational Photography 00051 @{ 00052 @defgroup photo_denoise Denoising 00053 @defgroup photo_hdr HDR imaging 00054 00055 This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment, 00056 camera calibration with multiple exposures and exposure fusion. 00057 00058 @defgroup photo_clone Seamless Cloning 00059 @defgroup photo_render Non-Photorealistic Rendering 00060 @defgroup photo_c C API 00061 @} 00062 */ 00063 00064 namespace cv 00065 { 00066 00067 //! @addtogroup photo 00068 //! @{ 00069 00070 //! the inpainting algorithm 00071 enum 00072 { 00073 INPAINT_NS = 0, // Navier-Stokes algorithm 00074 INPAINT_TELEA = 1 // A. Telea algorithm 00075 }; 00076 00077 enum 00078 { 00079 NORMAL_CLONE = 1, 00080 MIXED_CLONE = 2, 00081 MONOCHROME_TRANSFER = 3 00082 }; 00083 00084 enum 00085 { 00086 RECURS_FILTER = 1, 00087 NORMCONV_FILTER = 2 00088 }; 00089 00090 /** @brief Restores the selected region in an image using the region neighborhood. 00091 00092 @param src Input 8-bit 1-channel or 3-channel image. 00093 @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that 00094 needs to be inpainted. 00095 @param dst Output image with the same size and type as src . 00096 @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered 00097 by the algorithm. 00098 @param flags Inpainting method that could be one of the following: 00099 - **INPAINT_NS** Navier-Stokes based method [Navier01] 00100 - **INPAINT_TELEA** Method by Alexandru Telea @cite Telea04 . 00101 00102 The function reconstructs the selected image area from the pixel near the area boundary. The 00103 function may be used to remove dust and scratches from a scanned photo, or to remove undesirable 00104 objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting> for more details. 00105 00106 @note 00107 - An example using the inpainting technique can be found at 00108 opencv_source_code/samples/cpp/inpaint.cpp 00109 - (Python) An example using the inpainting technique can be found at 00110 opencv_source_code/samples/python/inpaint.py 00111 */ 00112 CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask, 00113 OutputArray dst, double inpaintRadius, int flags ); 00114 00115 //! @addtogroup photo_denoise 00116 //! @{ 00117 00118 /** @brief Perform image denoising using Non-local Means Denoising algorithm 00119 <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 00120 optimizations. Noise expected to be a gaussian white noise 00121 00122 @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image. 00123 @param dst Output image with the same size and type as src . 00124 @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 00125 Should be odd. Recommended value 7 pixels 00126 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 00127 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 00128 denoising time. Recommended value 21 pixels 00129 @param h Parameter regulating filter strength. Big h value perfectly removes noise but also 00130 removes image details, smaller h value preserves details but also preserves some noise 00131 00132 This function expected to be applied to grayscale images. For colored images look at 00133 fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 00134 image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 00135 image to CIELAB colorspace and then separately denoise L and AB components with different h 00136 parameter. 00137 */ 00138 CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3, 00139 int templateWindowSize = 7, int searchWindowSize = 21); 00140 00141 /** @brief Perform image denoising using Non-local Means Denoising algorithm 00142 <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 00143 optimizations. Noise expected to be a gaussian white noise 00144 00145 @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 00146 2-channel, 3-channel or 4-channel image. 00147 @param dst Output image with the same size and type as src . 00148 @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 00149 Should be odd. Recommended value 7 pixels 00150 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 00151 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 00152 denoising time. Recommended value 21 pixels 00153 @param h Array of parameters regulating filter strength, either one 00154 parameter applied to all channels or one per channel in dst. Big h value 00155 perfectly removes noise but also removes image details, smaller h 00156 value preserves details but also preserves some noise 00157 @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1 00158 00159 This function expected to be applied to grayscale images. For colored images look at 00160 fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 00161 image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 00162 image to CIELAB colorspace and then separately denoise L and AB components with different h 00163 parameter. 00164 */ 00165 CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, 00166 const std::vector<float>& h, 00167 int templateWindowSize = 7, int searchWindowSize = 21, 00168 int normType = NORM_L2); 00169 00170 /** @brief Modification of fastNlMeansDenoising function for colored images 00171 00172 @param src Input 8-bit 3-channel image. 00173 @param dst Output image with the same size and type as src . 00174 @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 00175 Should be odd. Recommended value 7 pixels 00176 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 00177 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 00178 denoising time. Recommended value 21 pixels 00179 @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 00180 removes noise but also removes image details, smaller h value preserves details but also preserves 00181 some noise 00182 @param hColor The same as h but for color components. For most images value equals 10 00183 will be enough to remove colored noise and do not distort colors 00184 00185 The function converts image to CIELAB colorspace and then separately denoise L and AB components 00186 with given h parameters using fastNlMeansDenoising function. 00187 */ 00188 CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst, 00189 float h = 3, float hColor = 3, 00190 int templateWindowSize = 7, int searchWindowSize = 21); 00191 00192 /** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been 00193 captured in small period of time. For example video. This version of the function is for grayscale 00194 images or for manual manipulation with colorspaces. For more details see 00195 <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 00196 00197 @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or 00198 4-channel images sequence. All images should have the same type and 00199 size. 00200 @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 00201 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 00202 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 00203 imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 00204 srcImgs[imgToDenoiseIndex] image. 00205 @param dst Output image with the same size and type as srcImgs images. 00206 @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 00207 Should be odd. Recommended value 7 pixels 00208 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 00209 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 00210 denoising time. Recommended value 21 pixels 00211 @param h Parameter regulating filter strength. Bigger h value 00212 perfectly removes noise but also removes image details, smaller h 00213 value preserves details but also preserves some noise 00214 */ 00215 CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst, 00216 int imgToDenoiseIndex, int temporalWindowSize, 00217 float h = 3, int templateWindowSize = 7, int searchWindowSize = 21); 00218 00219 /** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been 00220 captured in small period of time. For example video. This version of the function is for grayscale 00221 images or for manual manipulation with colorspaces. For more details see 00222 <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 00223 00224 @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 00225 2-channel, 3-channel or 4-channel images sequence. All images should 00226 have the same type and size. 00227 @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 00228 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 00229 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 00230 imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 00231 srcImgs[imgToDenoiseIndex] image. 00232 @param dst Output image with the same size and type as srcImgs images. 00233 @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 00234 Should be odd. Recommended value 7 pixels 00235 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 00236 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 00237 denoising time. Recommended value 21 pixels 00238 @param h Array of parameters regulating filter strength, either one 00239 parameter applied to all channels or one per channel in dst. Big h value 00240 perfectly removes noise but also removes image details, smaller h 00241 value preserves details but also preserves some noise 00242 @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1 00243 */ 00244 CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst, 00245 int imgToDenoiseIndex, int temporalWindowSize, 00246 const std::vector<float>& h, 00247 int templateWindowSize = 7, int searchWindowSize = 21, 00248 int normType = NORM_L2); 00249 00250 /** @brief Modification of fastNlMeansDenoisingMulti function for colored images sequences 00251 00252 @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and 00253 size. 00254 @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 00255 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 00256 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 00257 imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 00258 srcImgs[imgToDenoiseIndex] image. 00259 @param dst Output image with the same size and type as srcImgs images. 00260 @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 00261 Should be odd. Recommended value 7 pixels 00262 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 00263 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 00264 denoising time. Recommended value 21 pixels 00265 @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 00266 removes noise but also removes image details, smaller h value preserves details but also preserves 00267 some noise. 00268 @param hColor The same as h but for color components. 00269 00270 The function converts images to CIELAB colorspace and then separately denoise L and AB components 00271 with given h parameters using fastNlMeansDenoisingMulti function. 00272 */ 00273 CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, OutputArray dst, 00274 int imgToDenoiseIndex, int temporalWindowSize, 00275 float h = 3, float hColor = 3, 00276 int templateWindowSize = 7, int searchWindowSize = 21); 00277 00278 /** @brief Primal-dual algorithm is an algorithm for solving special types of variational problems (that is, 00279 finding a function to minimize some functional). As the image denoising, in particular, may be seen 00280 as the variational problem, primal-dual algorithm then can be used to perform denoising and this is 00281 exactly what is implemented. 00282 00283 It should be noted, that this implementation was taken from the July 2013 blog entry 00284 @cite MA13 , which also contained (slightly more general) ready-to-use source code on Python. 00285 Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end 00286 of July 2013 and finally it was slightly adapted by later authors. 00287 00288 Although the thorough discussion and justification of the algorithm involved may be found in 00289 @cite ChambolleEtAl, it might make sense to skim over it here, following @cite MA13 . To begin 00290 with, we consider the 1-byte gray-level images as the functions from the rectangular domain of 00291 pixels (it may be seen as set 00292 \f$\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\f$ for some 00293 \f$m,\;n\in\mathbb{N}\f$) into \f$\{0,1,\dots,255\}\f$. We shall denote the noised images as \f$f_i\f$ and with 00294 this view, given some image \f$x\f$ of the same size, we may measure how bad it is by the formula 00295 00296 \f[\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\f] 00297 00298 \f$\|\|\cdot\|\|\f$ here denotes \f$L_2\f$-norm and as you see, the first addend states that we want our 00299 image to be smooth (ideally, having zero gradient, thus being constant) and the second states that 00300 we want our result to be close to the observations we've got. If we treat \f$x\f$ as a function, this is 00301 exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play. 00302 00303 @param observations This array should contain one or more noised versions of the image that is to 00304 be restored. 00305 @param result Here the denoised image will be stored. There is no need to do pre-allocation of 00306 storage space, as it will be automatically allocated, if necessary. 00307 @param lambda Corresponds to \f$\lambda\f$ in the formulas above. As it is enlarged, the smooth 00308 (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly 00309 speaking, as it becomes smaller, the result will be more blur but more sever outliers will be 00310 removed. 00311 @param niters Number of iterations that the algorithm will run. Of course, as more iterations as 00312 better, but it is hard to quantitatively refine this statement, so just use the default and 00313 increase it if the results are poor. 00314 */ 00315 CV_EXPORTS_W void denoise_TVL1(const std::vector<Mat>& observations,Mat& result, double lambda=1.0, int niters=30); 00316 00317 //! @} photo_denoise 00318 00319 //! @addtogroup photo_hdr 00320 //! @{ 00321 00322 enum { LDR_SIZE = 256 }; 00323 00324 /** @brief Base class for tonemapping algorithms - tools that are used to map HDR image to 8-bit range. 00325 */ 00326 class CV_EXPORTS_W Tonemap : public Algorithm 00327 { 00328 public: 00329 /** @brief Tonemaps image 00330 00331 @param src source image - 32-bit 3-channel Mat 00332 @param dst destination image - 32-bit 3-channel Mat with values in [0, 1] range 00333 */ 00334 CV_WRAP virtual void process(InputArray src, OutputArray dst) = 0; 00335 00336 CV_WRAP virtual float getGamma() const = 0; 00337 CV_WRAP virtual void setGamma(float gamma) = 0; 00338 }; 00339 00340 /** @brief Creates simple linear mapper with gamma correction 00341 00342 @param gamma positive value for gamma correction. Gamma value of 1.0 implies no correction, gamma 00343 equal to 2.2f is suitable for most displays. 00344 Generally gamma > 1 brightens the image and gamma < 1 darkens it. 00345 */ 00346 CV_EXPORTS_W Ptr<Tonemap> createTonemap(float gamma = 1.0f); 00347 00348 /** @brief Adaptive logarithmic mapping is a fast global tonemapping algorithm that scales the image in 00349 logarithmic domain. 00350 00351 Since it's a global operator the same function is applied to all the pixels, it is controlled by the 00352 bias parameter. 00353 00354 Optional saturation enhancement is possible as described in @cite FL02 . 00355 00356 For more information see @cite DM03 . 00357 */ 00358 class CV_EXPORTS_W TonemapDrago : public Tonemap 00359 { 00360 public: 00361 00362 CV_WRAP virtual float getSaturation() const = 0; 00363 CV_WRAP virtual void setSaturation(float saturation) = 0; 00364 00365 CV_WRAP virtual float getBias() const = 0; 00366 CV_WRAP virtual void setBias(float bias) = 0; 00367 }; 00368 00369 /** @brief Creates TonemapDrago object 00370 00371 @param gamma gamma value for gamma correction. See createTonemap 00372 @param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater 00373 than 1 increase saturation and values less than 1 decrease it. 00374 @param bias value for bias function in [0, 1] range. Values from 0.7 to 0.9 usually give best 00375 results, default value is 0.85. 00376 */ 00377 CV_EXPORTS_W Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f); 00378 00379 /** @brief This algorithm decomposes image into two layers: base layer and detail layer using bilateral filter 00380 and compresses contrast of the base layer thus preserving all the details. 00381 00382 This implementation uses regular bilateral filter from opencv. 00383 00384 Saturation enhancement is possible as in ocvTonemapDrago. 00385 00386 For more information see @cite DD02 . 00387 */ 00388 class CV_EXPORTS_W TonemapDurand : public Tonemap 00389 { 00390 public: 00391 00392 CV_WRAP virtual float getSaturation() const = 0; 00393 CV_WRAP virtual void setSaturation(float saturation) = 0; 00394 00395 CV_WRAP virtual float getContrast() const = 0; 00396 CV_WRAP virtual void setContrast(float contrast) = 0; 00397 00398 CV_WRAP virtual float getSigmaSpace() const = 0; 00399 CV_WRAP virtual void setSigmaSpace(float sigma_space) = 0; 00400 00401 CV_WRAP virtual float getSigmaColor() const = 0; 00402 CV_WRAP virtual void setSigmaColor(float sigma_color) = 0; 00403 }; 00404 00405 /** @brief Creates TonemapDurand object 00406 00407 @param gamma gamma value for gamma correction. See createTonemap 00408 @param contrast resulting contrast on logarithmic scale, i. e. log(max / min), where max and min 00409 are maximum and minimum luminance values of the resulting image. 00410 @param saturation saturation enhancement value. See createTonemapDrago 00411 @param sigma_space bilateral filter sigma in color space 00412 @param sigma_color bilateral filter sigma in coordinate space 00413 */ 00414 CV_EXPORTS_W Ptr<TonemapDurand> 00415 createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation = 1.0f, float sigma_space = 2.0f, float sigma_color = 2.0f); 00416 00417 /** @brief This is a global tonemapping operator that models human visual system. 00418 00419 Mapping function is controlled by adaptation parameter, that is computed using light adaptation and 00420 color adaptation. 00421 00422 For more information see @cite RD05 . 00423 */ 00424 class CV_EXPORTS_W TonemapReinhard : public Tonemap 00425 { 00426 public: 00427 CV_WRAP virtual float getIntensity() const = 0; 00428 CV_WRAP virtual void setIntensity(float intensity) = 0; 00429 00430 CV_WRAP virtual float getLightAdaptation() const = 0; 00431 CV_WRAP virtual void setLightAdaptation(float light_adapt) = 0; 00432 00433 CV_WRAP virtual float getColorAdaptation() const = 0; 00434 CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0; 00435 }; 00436 00437 /** @brief Creates TonemapReinhard object 00438 00439 @param gamma gamma value for gamma correction. See createTonemap 00440 @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results. 00441 @param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel 00442 value, if 0 it's global, otherwise it's a weighted mean of this two cases. 00443 @param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently, 00444 if 0 adaptation level is the same for each channel. 00445 */ 00446 CV_EXPORTS_W Ptr<TonemapReinhard> 00447 createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f); 00448 00449 /** @brief This algorithm transforms image to contrast using gradients on all levels of gaussian pyramid, 00450 transforms contrast values to HVS response and scales the response. After this the image is 00451 reconstructed from new contrast values. 00452 00453 For more information see @cite MM06 . 00454 */ 00455 class CV_EXPORTS_W TonemapMantiuk : public Tonemap 00456 { 00457 public: 00458 CV_WRAP virtual float getScale() const = 0; 00459 CV_WRAP virtual void setScale(float scale) = 0; 00460 00461 CV_WRAP virtual float getSaturation() const = 0; 00462 CV_WRAP virtual void setSaturation(float saturation) = 0; 00463 }; 00464 00465 /** @brief Creates TonemapMantiuk object 00466 00467 @param gamma gamma value for gamma correction. See createTonemap 00468 @param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing 00469 dynamic range. Values from 0.6 to 0.9 produce best results. 00470 @param saturation saturation enhancement value. See createTonemapDrago 00471 */ 00472 CV_EXPORTS_W Ptr<TonemapMantiuk> 00473 createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f); 00474 00475 /** @brief The base class for algorithms that align images of the same scene with different exposures 00476 */ 00477 class CV_EXPORTS_W AlignExposures : public Algorithm 00478 { 00479 public: 00480 /** @brief Aligns images 00481 00482 @param src vector of input images 00483 @param dst vector of aligned images 00484 @param times vector of exposure time values for each image 00485 @param response 256x1 matrix with inverse camera response function for each pixel value, it should 00486 have the same number of channels as images. 00487 */ 00488 CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst, 00489 InputArray times, InputArray response) = 0; 00490 }; 00491 00492 /** @brief This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median 00493 luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations. 00494 00495 It is invariant to exposure, so exposure values and camera response are not necessary. 00496 00497 In this implementation new image regions are filled with zeros. 00498 00499 For more information see @cite GW03 . 00500 */ 00501 class CV_EXPORTS_W AlignMTB : public AlignExposures 00502 { 00503 public: 00504 CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst, 00505 InputArray times, InputArray response) = 0; 00506 00507 /** @brief Short version of process, that doesn't take extra arguments. 00508 00509 @param src vector of input images 00510 @param dst vector of aligned images 00511 */ 00512 CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst) = 0; 00513 00514 /** @brief Calculates shift between two images, i. e. how to shift the second image to correspond it with the 00515 first. 00516 00517 @param img0 first image 00518 @param img1 second image 00519 */ 00520 CV_WRAP virtual Point calculateShift(InputArray img0, InputArray img1) = 0; 00521 /** @brief Helper function, that shift Mat filling new regions with zeros. 00522 00523 @param src input image 00524 @param dst result image 00525 @param shift shift value 00526 */ 00527 CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0; 00528 /** @brief Computes median threshold and exclude bitmaps of given image. 00529 00530 @param img input image 00531 @param tb median threshold bitmap 00532 @param eb exclude bitmap 00533 */ 00534 CV_WRAP virtual void computeBitmaps(InputArray img, OutputArray tb, OutputArray eb) = 0; 00535 00536 CV_WRAP virtual int getMaxBits() const = 0; 00537 CV_WRAP virtual void setMaxBits(int max_bits) = 0; 00538 00539 CV_WRAP virtual int getExcludeRange() const = 0; 00540 CV_WRAP virtual void setExcludeRange(int exclude_range) = 0; 00541 00542 CV_WRAP virtual bool getCut() const = 0; 00543 CV_WRAP virtual void setCut(bool value) = 0; 00544 }; 00545 00546 /** @brief Creates AlignMTB object 00547 00548 @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are 00549 usually good enough (31 and 63 pixels shift respectively). 00550 @param exclude_range range for exclusion bitmap that is constructed to suppress noise around the 00551 median value. 00552 @param cut if true cuts images, otherwise fills the new regions with zeros. 00553 */ 00554 CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true); 00555 00556 /** @brief The base class for camera response calibration algorithms. 00557 */ 00558 class CV_EXPORTS_W CalibrateCRF : public Algorithm 00559 { 00560 public: 00561 /** @brief Recovers inverse camera response. 00562 00563 @param src vector of input images 00564 @param dst 256x1 matrix with inverse camera response function 00565 @param times vector of exposure time values for each image 00566 */ 00567 CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0; 00568 }; 00569 00570 /** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective 00571 function as linear system. Objective function is constructed using pixel values on the same position 00572 in all images, extra term is added to make the result smoother. 00573 00574 For more information see @cite DM97 . 00575 */ 00576 class CV_EXPORTS_W CalibrateDebevec : public CalibrateCRF 00577 { 00578 public: 00579 CV_WRAP virtual float getLambda() const = 0; 00580 CV_WRAP virtual void setLambda(float lambda) = 0; 00581 00582 CV_WRAP virtual int getSamples() const = 0; 00583 CV_WRAP virtual void setSamples(int samples) = 0; 00584 00585 CV_WRAP virtual bool getRandom() const = 0; 00586 CV_WRAP virtual void setRandom(bool random) = 0; 00587 }; 00588 00589 /** @brief Creates CalibrateDebevec object 00590 00591 @param samples number of pixel locations to use 00592 @param lambda smoothness term weight. Greater values produce smoother results, but can alter the 00593 response. 00594 @param random if true sample pixel locations are chosen at random, otherwise the form a 00595 rectangular grid. 00596 */ 00597 CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false); 00598 00599 /** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective 00600 function as linear system. This algorithm uses all image pixels. 00601 00602 For more information see @cite RB99 . 00603 */ 00604 class CV_EXPORTS_W CalibrateRobertson : public CalibrateCRF 00605 { 00606 public: 00607 CV_WRAP virtual int getMaxIter() const = 0; 00608 CV_WRAP virtual void setMaxIter(int max_iter) = 0; 00609 00610 CV_WRAP virtual float getThreshold() const = 0; 00611 CV_WRAP virtual void setThreshold(float threshold) = 0; 00612 00613 CV_WRAP virtual Mat getRadiance() const = 0; 00614 }; 00615 00616 /** @brief Creates CalibrateRobertson object 00617 00618 @param max_iter maximal number of Gauss-Seidel solver iterations. 00619 @param threshold target difference between results of two successive steps of the minimization. 00620 */ 00621 CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f); 00622 00623 /** @brief The base class algorithms that can merge exposure sequence to a single image. 00624 */ 00625 class CV_EXPORTS_W MergeExposures : public Algorithm 00626 { 00627 public: 00628 /** @brief Merges images. 00629 00630 @param src vector of input images 00631 @param dst result image 00632 @param times vector of exposure time values for each image 00633 @param response 256x1 matrix with inverse camera response function for each pixel value, it should 00634 have the same number of channels as images. 00635 */ 00636 CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, 00637 InputArray times, InputArray response) = 0; 00638 }; 00639 00640 /** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure 00641 values and camera response. 00642 00643 For more information see @cite DM97 . 00644 */ 00645 class CV_EXPORTS_W MergeDebevec : public MergeExposures 00646 { 00647 public: 00648 CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, 00649 InputArray times, InputArray response) = 0; 00650 CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0; 00651 }; 00652 00653 /** @brief Creates MergeDebevec object 00654 */ 00655 CV_EXPORTS_W Ptr<MergeDebevec> createMergeDebevec(); 00656 00657 /** @brief Pixels are weighted using contrast, saturation and well-exposedness measures, than images are 00658 combined using laplacian pyramids. 00659 00660 The resulting image weight is constructed as weighted average of contrast, saturation and 00661 well-exposedness measures. 00662 00663 The resulting image doesn't require tonemapping and can be converted to 8-bit image by multiplying 00664 by 255, but it's recommended to apply gamma correction and/or linear tonemapping. 00665 00666 For more information see @cite MK07 . 00667 */ 00668 class CV_EXPORTS_W MergeMertens : public MergeExposures 00669 { 00670 public: 00671 CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, 00672 InputArray times, InputArray response) = 0; 00673 /** @brief Short version of process, that doesn't take extra arguments. 00674 00675 @param src vector of input images 00676 @param dst result image 00677 */ 00678 CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst) = 0; 00679 00680 CV_WRAP virtual float getContrastWeight() const = 0; 00681 CV_WRAP virtual void setContrastWeight(float contrast_weiht) = 0; 00682 00683 CV_WRAP virtual float getSaturationWeight() const = 0; 00684 CV_WRAP virtual void setSaturationWeight(float saturation_weight) = 0; 00685 00686 CV_WRAP virtual float getExposureWeight() const = 0; 00687 CV_WRAP virtual void setExposureWeight(float exposure_weight) = 0; 00688 }; 00689 00690 /** @brief Creates MergeMertens object 00691 00692 @param contrast_weight contrast measure weight. See MergeMertens. 00693 @param saturation_weight saturation measure weight 00694 @param exposure_weight well-exposedness measure weight 00695 */ 00696 CV_EXPORTS_W Ptr<MergeMertens> 00697 createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f); 00698 00699 /** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure 00700 values and camera response. 00701 00702 For more information see @cite RB99 . 00703 */ 00704 class CV_EXPORTS_W MergeRobertson : public MergeExposures 00705 { 00706 public: 00707 CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, 00708 InputArray times, InputArray response) = 0; 00709 CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0; 00710 }; 00711 00712 /** @brief Creates MergeRobertson object 00713 */ 00714 CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson(); 00715 00716 //! @} photo_hdr 00717 00718 /** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized 00719 black-and-white photograph rendering, and in many single channel image processing applications 00720 @cite CL12 . 00721 00722 @param src Input 8-bit 3-channel image. 00723 @param grayscale Output 8-bit 1-channel image. 00724 @param color_boost Output 8-bit 3-channel image. 00725 00726 This function is to be applied on color images. 00727 */ 00728 CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost); 00729 00730 //! @addtogroup photo_clone 00731 //! @{ 00732 00733 /** @brief Image editing tasks concern either global changes (color/intensity corrections, filters, 00734 deformations) or local changes concerned to a selection. Here we are interested in achieving local 00735 changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless 00736 manner. The extent of the changes ranges from slight distortions to complete replacement by novel 00737 content @cite PM03 . 00738 00739 @param src Input 8-bit 3-channel image. 00740 @param dst Input 8-bit 3-channel image. 00741 @param mask Input 8-bit 1 or 3-channel image. 00742 @param p Point in dst image where object is placed. 00743 @param blend Output image with the same size and type as dst. 00744 @param flags Cloning method that could be one of the following: 00745 - **NORMAL_CLONE** The power of the method is fully expressed when inserting objects with 00746 complex outlines into a new background 00747 - **MIXED_CLONE** The classic method, color-based selection and alpha masking might be time 00748 consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the 00749 original image, is not effective. Mixed seamless cloning based on a loose selection proves 00750 effective. 00751 - **FEATURE_EXCHANGE** Feature exchange allows the user to easily replace certain features of 00752 one object by alternative features. 00753 */ 00754 CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p, 00755 OutputArray blend, int flags); 00756 00757 /** @brief Given an original color image, two differently colored versions of this image can be mixed 00758 seamlessly. 00759 00760 @param src Input 8-bit 3-channel image. 00761 @param mask Input 8-bit 1 or 3-channel image. 00762 @param dst Output image with the same size and type as src . 00763 @param red_mul R-channel multiply factor. 00764 @param green_mul G-channel multiply factor. 00765 @param blue_mul B-channel multiply factor. 00766 00767 Multiplication factor is between .5 to 2.5. 00768 */ 00769 CV_EXPORTS_W void colorChange(InputArray src, InputArray mask, OutputArray dst, float red_mul = 1.0f, 00770 float green_mul = 1.0f, float blue_mul = 1.0f); 00771 00772 /** @brief Applying an appropriate non-linear transformation to the gradient field inside the selection and 00773 then integrating back with a Poisson solver, modifies locally the apparent illumination of an image. 00774 00775 @param src Input 8-bit 3-channel image. 00776 @param mask Input 8-bit 1 or 3-channel image. 00777 @param dst Output image with the same size and type as src. 00778 @param alpha Value ranges between 0-2. 00779 @param beta Value ranges between 0-2. 00780 00781 This is useful to highlight under-exposed foreground objects or to reduce specular reflections. 00782 */ 00783 CV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArray dst, 00784 float alpha = 0.2f, float beta = 0.4f); 00785 00786 /** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one 00787 washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge 00788 Detector is used. 00789 00790 @param src Input 8-bit 3-channel image. 00791 @param mask Input 8-bit 1 or 3-channel image. 00792 @param dst Output image with the same size and type as src. 00793 @param low_threshold Range from 0 to 100. 00794 @param high_threshold Value > 100. 00795 @param kernel_size The size of the Sobel kernel to be used. 00796 00797 **NOTE:** 00798 00799 The algorithm assumes that the color of the source image is close to that of the destination. This 00800 assumption means that when the colors don't match, the source image color gets tinted toward the 00801 color of the destination image. 00802 */ 00803 CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray dst, 00804 float low_threshold = 30, float high_threshold = 45, 00805 int kernel_size = 3); 00806 00807 //! @} photo_clone 00808 00809 //! @addtogroup photo_render 00810 //! @{ 00811 00812 /** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing 00813 filters are used in many different applications @cite EM11 . 00814 00815 @param src Input 8-bit 3-channel image. 00816 @param dst Output 8-bit 3-channel image. 00817 @param flags Edge preserving filters: 00818 - **RECURS_FILTER** = 1 00819 - **NORMCONV_FILTER** = 2 00820 @param sigma_s Range between 0 to 200. 00821 @param sigma_r Range between 0 to 1. 00822 */ 00823 CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1, 00824 float sigma_s = 60, float sigma_r = 0.4f); 00825 00826 /** @brief This filter enhances the details of a particular image. 00827 00828 @param src Input 8-bit 3-channel image. 00829 @param dst Output image with the same size and type as src. 00830 @param sigma_s Range between 0 to 200. 00831 @param sigma_r Range between 0 to 1. 00832 */ 00833 CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10, 00834 float sigma_r = 0.15f); 00835 00836 /** @brief Pencil-like non-photorealistic line drawing 00837 00838 @param src Input 8-bit 3-channel image. 00839 @param dst1 Output 8-bit 1-channel image. 00840 @param dst2 Output image with the same size and type as src. 00841 @param sigma_s Range between 0 to 200. 00842 @param sigma_r Range between 0 to 1. 00843 @param shade_factor Range between 0 to 0.1. 00844 */ 00845 CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2, 00846 float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f); 00847 00848 /** @brief Stylization aims to produce digital imagery with a wide variety of effects not focused on 00849 photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low 00850 contrast while preserving, or enhancing, high-contrast features. 00851 00852 @param src Input 8-bit 3-channel image. 00853 @param dst Output image with the same size and type as src. 00854 @param sigma_s Range between 0 to 200. 00855 @param sigma_r Range between 0 to 1. 00856 */ 00857 CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60, 00858 float sigma_r = 0.45f); 00859 00860 //! @} photo_render 00861 00862 //! @} photo 00863 00864 } // cv 00865 00866 #ifndef DISABLE_OPENCV_24_COMPATIBILITY 00867 #include "opencv2/photo/photo_c.h" 00868 #endif 00869 00870 #endif 00871
Generated on Tue Jul 12 2022 16:42:39 by 1.7.2