endif()
endif()
-if(CMAKE_SYSTEM_PROCESSOR MATCHES amd64.*|x86_64.*)
+if(CMAKE_SYSTEM_PROCESSOR MATCHES amd64.*|x86_64.* OR CMAKE_GENERATOR MATCHES "Visual Studio.*Win64")
set(X86_64 1)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES i686.*|i386.*|x86.*)
set(X86 1)
if( validPixROI )
{
- icvGetRectangles( cameraMatrix, distCoeffs, 0, newCameraMatrix, imgSize, inner, outer );
+ icvGetRectangles( cameraMatrix, distCoeffs, 0, &matM, imgSize, inner, outer );
cv::Rect r = inner;
r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
*validPixROI = r;
{
Mat points = _points.getMat(), F = _Fmat.getMat();
int npoints = points.checkVector(2);
+ if( npoints < 0 )
+ npoints = points.checkVector(3);
CV_Assert( npoints >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S));
_lines.create(npoints, 1, CV_32FC3, -1, true);
~Ptr();
//! copy constructor. Copies the members and calls addref()
Ptr(const Ptr& ptr);
+ template<typename _Tp2> Ptr(const Ptr<_Tp2>& ptr);
//! copy operator. Calls ptr.addref() and release() before copying the members
Ptr& operator = (const Ptr& ptr);
//! increments the reference counter
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4714 ) //__forceinline is not inlined
#pragma warning( disable: 4127 ) //conditional expression is constant
+#pragma warning( disable: 4244 ) //conversion from '__int64' to 'int', possible loss of data
#endif
namespace cv
template<typename _Tp> inline bool Ptr<_Tp>::empty() const { return obj == 0; }
+template<typename _Tp> template<typename _Tp2> Ptr<_Tp>::Ptr(const Ptr<_Tp2>& p)
+ : obj(0), refcount(0)
+{
+ if (p.empty())
+ return;
+
+ _Tp* p_casted = dynamic_cast<_Tp*>(p.obj);
+ if (!p_casted)
+ return;
+
+ obj = p_casted;
+ refcount = p.refcount;
+ addref();
+}
+
template<typename _Tp> template<typename _Tp2> inline Ptr<_Tp2> Ptr<_Tp>::ptr()
{
Ptr<_Tp2> p;
if( !obj )
return p;
+
+ _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj);
+ if (!obj_casted)
+ return p;
+
if( refcount )
CV_XADD(refcount, 1);
- p.obj = dynamic_cast<_Tp2*>(obj);
+
+ p.obj = obj_casted;
p.refcount = refcount;
return p;
}
Ptr<_Tp2> p;
if( !obj )
return p;
+
+ _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj);
+ if (!obj_casted)
+ return p;
+
if( refcount )
CV_XADD(refcount, 1);
- p.obj = dynamic_cast<_Tp2*>(obj);
+
+ p.obj = obj_casted;
p.refcount = refcount;
return p;
}
{
data = new AlgorithmInfoData;
data->_name = _name;
- alglist().add(_name, create);
+ if (!alglist().find(_name, create))
+ alglist().add(_name, create);
}
AlgorithmInfo::~AlgorithmInfo()
if (!v10dzero[ii] == 0.0)
throw test_excep();
}
+
+ Mat A(1, 32, CV_32F), B;
+ for( int i = 0; i < A.cols; i++ )
+ A.at<float>(i) = (float)(i <= 12 ? i : 24 - i);
+ transpose(A, B);
+
+ int minidx[2] = {0, 0}, maxidx[2] = {0, 0};
+ double minval = 0, maxval = 0;
+ minMaxIdx(A, &minval, &maxval, minidx, maxidx);
+
+ if( !(minidx[0] == 0 && minidx[1] == 31 && maxidx[0] == 0 && maxidx[1] == 12 &&
+ minval == -7 && maxval == 12))
+ throw test_excep();
+
+ minMaxIdx(B, &minval, &maxval, minidx, maxidx);
+
+ if( !(minidx[0] == 31 && minidx[1] == 0 && maxidx[0] == 12 && maxidx[1] == 0 &&
+ minval == -7 && maxval == 12))
+ throw test_excep();
}
catch(const test_excep&)
{
Mat Q(3,3,CV_32FC1);
Mat U,Vt,R,T,W;
- Dp.at<float>(0,0)=0.86483884; Dp.at<float>(0,1)= -0.3077251; Dp.at<float>(0,2)=-0.55711365;
- Dp.at<float>(1,0)=0.49294353; Dp.at<float>(1,1)=-0.24209651; Dp.at<float>(1,2)=-0.25084701;
- Dp.at<float>(2,0)=0; Dp.at<float>(2,1)=0; Dp.at<float>(2,2)=0;
+ Dp.at<float>(0,0)=0.86483884f; Dp.at<float>(0,1)= -0.3077251f; Dp.at<float>(0,2)=-0.55711365f;
+ Dp.at<float>(1,0)=0.49294353f; Dp.at<float>(1,1)=-0.24209651f; Dp.at<float>(1,2)=-0.25084701f;
+ Dp.at<float>(2,0)=0; Dp.at<float>(2,1)=0; Dp.at<float>(2,2)=0;
- Dc.at<float>(0,0)=0.75632739; Dc.at<float>(0,1)= -0.38859656; Dc.at<float>(0,2)=-0.36773083;
- Dc.at<float>(1,0)=0.9699229; Dc.at<float>(1,1)=-0.49858192; Dc.at<float>(1,2)=-0.47134098;
- Dc.at<float>(2,0)=0.10566688; Dc.at<float>(2,1)=-0.060333252; Dc.at<float>(2,2)=-0.045333147;
+ Dc.at<float>(0,0)=0.75632739f; Dc.at<float>(0,1)= -0.38859656f; Dc.at<float>(0,2)=-0.36773083f;
+ Dc.at<float>(1,0)=0.9699229f; Dc.at<float>(1,1)=-0.49858192f; Dc.at<float>(1,2)=-0.47134098f;
+ Dc.at<float>(2,0)=0.10566688f; Dc.at<float>(2,1)=-0.060333252f; Dc.at<float>(2,2)=-0.045333147f;
Q=Dp*Dc.t();
SVD decomp;
// Calculate the channels of the opponent color space
{
- // (R - G) / sqrt(2)
+ // (R - G)/sqrt(2), but converted to the destination data type
MatConstIterator_<signed char> rIt = bgrChannels[2].begin<signed char>();
MatConstIterator_<signed char> gIt = bgrChannels[1].begin<signed char>();
MatIterator_<unsigned char> dstIt = opponentChannels[0].begin<unsigned char>();
- float factor = 1.f / sqrt(2.f);
for( ; dstIt != opponentChannels[0].end<unsigned char>(); ++rIt, ++gIt, ++dstIt )
{
- int value = static_cast<int>( static_cast<float>(static_cast<int>(*gIt)-static_cast<int>(*rIt)) * factor );
- if( value < 0 ) value = 0;
- if( value > 255 ) value = 255;
- (*dstIt) = static_cast<unsigned char>(value);
+ float value = 0.5f * (static_cast<int>(*gIt) -
+ static_cast<int>(*rIt) + 255);
+ (*dstIt) = static_cast<unsigned char>(value + 0.5f);
}
}
{
- // (R + G - 2B)/sqrt(6)
+ // (R + G - 2B)/sqrt(6), but converted to the destination data type
MatConstIterator_<signed char> rIt = bgrChannels[2].begin<signed char>();
MatConstIterator_<signed char> gIt = bgrChannels[1].begin<signed char>();
MatConstIterator_<signed char> bIt = bgrChannels[0].begin<signed char>();
MatIterator_<unsigned char> dstIt = opponentChannels[1].begin<unsigned char>();
- float factor = 1.f / sqrt(6.f);
for( ; dstIt != opponentChannels[1].end<unsigned char>(); ++rIt, ++gIt, ++bIt, ++dstIt )
{
- int value = static_cast<int>( static_cast<float>(static_cast<int>(*rIt) + static_cast<int>(*gIt) - 2*static_cast<int>(*bIt)) *
- factor );
- if( value < 0 ) value = 0;
- if( value > 255 ) value = 255;
- (*dstIt) = static_cast<unsigned char>(value);
+ float value = 0.25f * (static_cast<int>(*rIt) + static_cast<int>(*gIt) -
+ 2*static_cast<int>(*bIt) + 510);
+ (*dstIt) = static_cast<unsigned char>(value + 0.5f);
}
}
{
- // (R + G + B)/sqrt(3)
+ // (R + G + B)/sqrt(3), but converted to the destination data type
MatConstIterator_<signed char> rIt = bgrChannels[2].begin<signed char>();
MatConstIterator_<signed char> gIt = bgrChannels[1].begin<signed char>();
MatConstIterator_<signed char> bIt = bgrChannels[0].begin<signed char>();
MatIterator_<unsigned char> dstIt = opponentChannels[2].begin<unsigned char>();
- float factor = 1.f / sqrt(3.f);
+ float factor = 1.f/3.f;
for( ; dstIt != opponentChannels[2].end<unsigned char>(); ++rIt, ++gIt, ++bIt, ++dstIt )
{
- int value = static_cast<int>( static_cast<float>(static_cast<int>(*rIt) + static_cast<int>(*gIt) + static_cast<int>(*bIt)) *
- factor );
- if( value < 0 ) value = 0;
- if( value > 255 ) value = 255;
- (*dstIt) = static_cast<unsigned char>(value);
+ float value = factor * (static_cast<int>(*rIt) +
+ static_cast<int>(*gIt) +
+ static_cast<int>(*bIt));
+ (*dstIt) = static_cast<unsigned char>(value + 0.5f);
}
}
}
// allocate descriptor memory, estimate orientations, extract descriptors
if( !extAll ) {
// extract the best comparisons only
- descriptors = cv::Mat::zeros(keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
+ descriptors = cv::Mat::zeros((int)keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
#if CV_SSE2
__m128i* ptr= (__m128i*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
// binary: 10000000 => char: 128 or hex: 0x80
- const __m128i binMask = _mm_set_epi8(0x80, 0x80, 0x80, 0x80,
- 0x80, 0x80, 0x80, 0x80,
- 0x80, 0x80, 0x80, 0x80,
- 0x80, 0x80, 0x80, 0x80);
+ const __m128i binMask = _mm_set_epi8('\x80', '\x80', '\x80', '\x80',
+ '\x80', '\x80', '\x80', '\x80',
+ '\x80', '\x80', '\x80', '\x80',
+ '\x80', '\x80', '\x80', '\x80');
#else
std::bitset<FREAK_NB_PAIRS>* ptr = (std::bitset<FREAK_NB_PAIRS>*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
#endif
}
}
else { // extract all possible comparisons for selection
- descriptors = cv::Mat::zeros(keypoints.size(), 128, CV_8U);
+ descriptors = cv::Mat::zeros((int)keypoints.size(), 128, CV_8U);
std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
for( size_t k = keypoints.size(); k--; ) {
Mat descriptorsFloat = Mat::zeros(descriptors.rows, 903, CV_32F);
std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(descriptors.rows-1)*descriptors.step[0]);
- for( size_t m = descriptors.rows; m--; ) {
- for( size_t n = 903; n--; ) {
+ for( int m = descriptors.rows; m--; ) {
+ for( int n = 903; n--; ) {
if( ptr->test(n) == true )
- descriptorsFloat.at<float>(m,n)=1.0;
+ descriptorsFloat.at<float>(m,n)=1.0f;
}
--ptr;
}
std::vector<PairStat> pairStat;
- for( size_t n = 903; n--; ) {
+ for( int n = 903; n--; ) {
// the higher the variance, the better --> mean = 0.5
PairStat tmp = { fabs( mean(descriptorsFloat.col(n))[0]-0.5 ) ,n};
pairStat.push_back(tmp);
test.safe_run();
}
-TEST( Features2d_Detector_MSER, regression )
+TEST( Features2d_Detector_MSER, DISABLED_regression )
{
CV_FeatureDetectorTest test( "detector-mser", FeatureDetector::create("MSER") );
test.safe_run();
}
}
-TEST(Features2d_MSER, regression) { CV_MserTest test; test.safe_run(); }
+TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); }
case CV_CAP_PROP_FPS:
int fps = cvRound(value);
- if (fps != VI.getFPS(0))
+ if (fps != VI.getFPS(index))
{
VI.stopDevice(index);
VI.setIdealFramerate(index,fps);
void cvSetModeWindow_W32(const char* name, double prop_value);
void cvSetModeWindow_GTK(const char* name, double prop_value);
void cvSetModeWindow_CARBON(const char* name, double prop_value);
+void cvSetModeWindow_COCOA(const char* name, double prop_value);
double cvGetModeWindow_W32(const char* name);
double cvGetModeWindow_GTK(const char* name);
double cvGetModeWindow_CARBON(const char* name);
+double cvGetModeWindow_COCOA(const char* name);
double cvGetPropWindowAutoSize_W32(const char* name);
double cvGetPropWindowAutoSize_GTK(const char* name);
cvSetModeWindow_GTK(name,prop_value);
#elif defined (HAVE_CARBON)
cvSetModeWindow_CARBON(name,prop_value);
+ #elif defined (HAVE_COCOA)
+ cvSetModeWindow_COCOA(name,prop_value);
#endif
break;
return cvGetModeWindow_GTK(name);
#elif defined (HAVE_CARBON)
return cvGetModeWindow_CARBON(name);
+ #elif defined (HAVE_COCOA)
+ return cvGetModeWindow_COCOA(name);
#else
return -1;
#endif
CvMouseCallback mouseCallback;
void *mouseParam;
BOOL autosize;
- BOOL firstContent;
+ BOOL firstContent;
+ int status;
}
@property(assign) CvMouseCallback mouseCallback;
@property(assign) void *mouseParam;
@property(assign) BOOL autosize;
@property(assign) BOOL firstContent;
@property(retain) NSMutableDictionary *sliders;
+@property(readwrite) int status;
- (CVView *)contentView;
- (void)cvSendMouseEvent:(NSEvent *)event type:(int)type flags:(int)flags;
- (void)cvMouseEvent:(NSEvent *)event;
return returnCode;
}
+double cvGetModeWindow_COCOA( const char* name )
+{
+ double result = -1;
+ CVWindow *window = nil;
+
+ CV_FUNCNAME( "cvGetModeWindow_COCOA" );
+
+ __BEGIN__;
+ if( name == NULL )
+ {
+ CV_ERROR( CV_StsNullPtr, "NULL name string" );
+ }
+
+ window = cvGetWindow( name );
+ if ( window == NULL )
+ {
+ CV_ERROR( CV_StsNullPtr, "NULL window" );
+ }
+
+ result = window.status;
+
+ __END__;
+ return result;
+}
+
+void cvSetModeWindow_COCOA( const char* name, double prop_value )
+{
+ CVWindow *window = nil;
+ NSDictionary *fullscreenOptions = nil;
+ NSAutoreleasePool* localpool = nil;
+
+ CV_FUNCNAME( "cvSetModeWindow_COCOA" );
+
+ __BEGIN__;
+ if( name == NULL )
+ {
+ CV_ERROR( CV_StsNullPtr, "NULL name string" );
+ }
+
+ window = cvGetWindow(name);
+ if ( window == NULL )
+ {
+ CV_ERROR( CV_StsNullPtr, "NULL window" );
+ }
+
+ if ( [window autosize] )
+ {
+ return;
+ }
+
+ localpool = [[NSAutoreleasePool alloc] init];
+
+ fullscreenOptions = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES] forKey:NSFullScreenModeSetting];
+ if ( [[window contentView] isInFullScreenMode] && prop_value==CV_WINDOW_NORMAL )
+ {
+ [[window contentView] exitFullScreenModeWithOptions:fullscreenOptions];
+ window.status=CV_WINDOW_NORMAL;
+ }
+ else if( ![[window contentView] isInFullScreenMode] && prop_value==CV_WINDOW_FULLSCREEN )
+ {
+ [[window contentView] enterFullScreenMode:[NSScreen mainScreen] withOptions:fullscreenOptions];
+ window.status=CV_WINDOW_FULLSCREEN;
+ }
+
+ [localpool drain];
+
+ __END__;
+}
+
@implementation CVWindow
@synthesize mouseCallback;
@synthesize autosize;
@synthesize firstContent;
@synthesize sliders;
+@synthesize status;
- (void)cvSendMouseEvent:(NSEvent *)event type:(int)type flags:(int)flags {
//cout << "cvSendMouseEvent" << endl;
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
CVWindow *cvwindow = (CVWindow *)[self window];
int height = 0;
- for(NSString *key in [cvwindow sliders]) {
- height += [[[cvwindow sliders] valueForKey:key] frame].size.height;
- }
+ if ([cvwindow respondsToSelector:@selector(sliders)]) {
+ for(NSString *key in [cvwindow sliders]) {
+ height += [[[cvwindow sliders] valueForKey:key] frame].size.height;
+ }
+ }
NSRect imageRect = {{0,0}, {[image size].width, [image size].height}};
add_library(${the_module} SHARED ${handwrittren_h_sources} ${handwrittren_cpp_sources} ${generated_cpp_sources})
if(BUILD_FAT_JAVA_LIB)
set(__deps ${OPENCV_MODULE_${the_module}_DEPS} ${OPENCV_MODULES_BUILD})
- list(REMOVE_ITEM __deps ${the_module})
+ list(REMOVE_ITEM __deps ${the_module} opencv_ts)
ocv_list_unique(__deps)
set(__extradeps ${__deps})
ocv_list_filterout(__extradeps "^opencv_")
if(__extradeps)
list(REMOVE_ITEM __deps ${__extradeps})
endif()
-
target_link_libraries(${the_module} -Wl,-whole-archive ${__deps} -Wl,-no-whole-archive ${__extradeps} ${OPENCV_LINKER_LIBS})
else()
target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS} ${OPENCV_LINKER_LIBS})
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
- <name>OpenCV-2.4.2</name>
+ <name>OpenCV Library-2.4.2</name>
<comment></comment>
<projects>
</projects>
else
cvWriteInt( fs, "splitting_criteria", params.split_criteria );
- cvWriteInt( fs, "ntrees", params.weak_count );
+ cvWriteInt( fs, "ntrees", weak->total );
cvWriteReal( fs, "weight_trimming_rate", params.weight_trim_rate );
data->write_params( fs );
namespace detail {\r
\r
void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features)\r
-{ \r
+{\r
find(image, features);\r
features.img_size = image.size();\r
}\r
\r
\r
void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features, const vector<Rect> &rois)\r
-{ \r
+{\r
vector<ImageFeatures> roi_features(rois.size());\r
size_t total_kps_count = 0;\r
int total_descriptors_height = 0;\r
\r
features.img_size = image.size();\r
features.keypoints.resize(total_kps_count);\r
- features.descriptors.create(total_descriptors_height, \r
- roi_features[0].descriptors.cols, \r
+ features.descriptors.create(total_descriptors_height,\r
+ roi_features[0].descriptors.cols,\r
roi_features[0].descriptors.type());\r
\r
int kp_idx = 0;\r
{\r
detector_ = Algorithm::create<FeatureDetector>("Feature2D.SURF");\r
extractor_ = Algorithm::create<DescriptorExtractor>("Feature2D.SURF");\r
- \r
+\r
if( detector_.empty() || extractor_.empty() )\r
CV_Error( CV_StsNotImplemented, "OpenCV was built without SURF support" );\r
- \r
+\r
detector_->set("hessianThreshold", hess_thresh);\r
detector_->set("nOctaves", num_octaves);\r
detector_->set("nOctaveLayers", num_layers);\r
- \r
+\r
extractor_->set("nOctaves", num_octaves_descr);\r
extractor_->set("nOctaveLayers", num_layers_descr);\r
}\r
int xr = (c+1) * gray_image.cols / grid_size.width;\r
int yr = (r+1) * gray_image.rows / grid_size.height;\r
\r
- LOGLN("OrbFeaturesFinder::find: gray_image.empty=" << (gray_image.empty()?"true":"false") << ", "\r
- << " gray_image.size()=(" << gray_image.size().width << "x" << gray_image.size().height << "), "\r
- << " yl=" << yl << ", yr=" << yr << ", "\r
- << " xl=" << xl << ", xr=" << xr << ", gray_image.data=" << ((size_t)gray_image.data) << ", "\r
- << "gray_image.dims=" << gray_image.dims << "\n");\r
+ // LOGLN("OrbFeaturesFinder::find: gray_image.empty=" << (gray_image.empty()?"true":"false") << ", "\r
+ // << " gray_image.size()=(" << gray_image.size().width << "x" << gray_image.size().height << "), "\r
+ // << " yl=" << yl << ", yr=" << yr << ", "\r
+ // << " xl=" << xl << ", xr=" << xr << ", gray_image.data=" << ((size_t)gray_image.data) << ", "\r
+ // << "gray_image.dims=" << gray_image.dims << "\n");\r
\r
Mat gray_image_part=gray_image(Range(yl, yr), Range(xl, xr));\r
- LOGLN("OrbFeaturesFinder::find: gray_image_part.empty=" << (gray_image_part.empty()?"true":"false") << ", "\r
- << " gray_image_part.size()=(" << gray_image_part.size().width << "x" << gray_image_part.size().height << "), "\r
- << " gray_image_part.dims=" << gray_image_part.dims << ", "\r
- << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");\r
+ // LOGLN("OrbFeaturesFinder::find: gray_image_part.empty=" << (gray_image_part.empty()?"true":"false") << ", "\r
+ // << " gray_image_part.size()=(" << gray_image_part.size().width << "x" << gray_image_part.size().height << "), "\r
+ // << " gray_image_part.dims=" << gray_image_part.dims << ", "\r
+ // << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");\r
\r
(*orb)(gray_image_part, Mat(), points, descriptors);\r
\r
if (matches_info.inliers_mask[i])\r
matches_info.num_inliers++;\r
\r
- // These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching \r
+ // These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching\r
// using Invariant Features"\r
matches_info.confidence = matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());\r
\r
- // Set zero confidence to remove matches between too close images, as they don't provide \r
+ // Set zero confidence to remove matches between too close images, as they don't provide\r
// additional information anyway. The threshold was set experimentally.\r
matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;\r
\r
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/internal.hpp"
+#include <list>
+
#ifdef HAVE_TEGRA_OPTIMIZATION
#include "opencv2/video/video_tegra.hpp"
#endif
-<?xml version="1.0" encoding="UTF-8"?>\r
-<projectDescription>\r
- <name>Sample - 15-puzzle</name>\r
- <comment></comment>\r
- <projects>\r
- </projects>\r
- <buildSpec>\r
- <buildCommand>\r
- <name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>\r
- <arguments>\r
- </arguments>\r
- </buildCommand>\r
- <buildCommand>\r
- <name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>\r
- <arguments>\r
- </arguments>\r
- </buildCommand>\r
- <buildCommand>\r
- <name>org.eclipse.jdt.core.javabuilder</name>\r
- <arguments>\r
- </arguments>\r
- </buildCommand>\r
- <buildCommand>\r
- <name>com.android.ide.eclipse.adt.ApkBuilder</name>\r
- <arguments>\r
- </arguments>\r
- </buildCommand>\r
- </buildSpec>\r
- <natures>\r
- <nature>com.android.ide.eclipse.adt.AndroidNature</nature>\r
- <nature>org.eclipse.jdt.core.javanature</nature>\r
- </natures>\r
-</projectDescription>\r
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>OpenCV Sample - 15-puzzle</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ApkBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>com.android.ide.eclipse.adt.AndroidNature</nature>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ </natures>
+</projectDescription>
<?xml version="1.0" encoding="utf-8"?>
<resources>
- <string name="app_name">OpenCV Sample - 15-puzzle</string>
+ <string name="app_name">OCV 15 Puzzle</string>
</resources>
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
- <name>ColorBlobDetection</name>
+ <name>OpenCV Sample - color-blob-detection</name>
<comment></comment>
<projects>
</projects>
<uses-sdk android:minSdkVersion="8" />
<application
- android:icon="@drawable/ic_launcher"
+ android:icon="@drawable/icon"
android:label="@string/app_name" >
<activity
android:name="org.opencv.samples.colorblobdetect.ColorBlobDetectionActivity"
+++ /dev/null
-<?xml version="1.0" encoding="utf-8"?>
-<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
- android:layout_width="fill_parent"
- android:layout_height="fill_parent"
- android:orientation="vertical" >
-
- <TextView
- android:layout_width="fill_parent"
- android:layout_height="wrap_content"
- android:text="@string/hello" />
-
-</LinearLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<resources>
-
- <string name="hello">Hello World, ColorBlobDetectionActivity!</string>
- <string name="app_name">ColorBlobDetection</string>
-
+ <string name="app_name">OCV Color Blob Detection</string>
</resources>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>\r
<projectDescription>\r
- <name>Sample - face-detection</name>\r
+ <name>OpenCV Sample - face-detection</name>\r
<comment></comment>\r
<projects>\r
</projects>\r
<?xml version="1.0" encoding="utf-8"?>\r
<resources>\r
- <string name="app_name">OpenCV Sample - face-detection</string>\r
+ <string name="app_name">OCV Face Detection</string>\r
</resources>\r
<?xml version="1.0" encoding="UTF-8"?>\r
<projectDescription>\r
- <name>Sample - image-manipulations</name>\r
+ <name>OpenCV Sample - image-manipulations</name>\r
<comment></comment>\r
<projects>\r
</projects>\r
<?xml version="1.0" encoding="utf-8"?>\r
<resources>\r
- <string name="app_name">OpenCV Sample - image-manipulations</string>\r
+ <string name="app_name">OCV Image Manipulations</string>\r
</resources>\r
-<?xml version="1.0" encoding="UTF-8"?>\r
-<projectDescription>\r
- <name>Tutorial 0 (Basic) - Android Camera</name>\r
- <comment></comment>\r
- <projects>\r
- </projects>\r
- <buildSpec>\r
- <buildCommand>\r
- <name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>\r
- <arguments>\r
- </arguments>\r
- </buildCommand>\r
- <buildCommand>\r
- <name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>\r
- <arguments>\r
- </arguments>\r
- </buildCommand>\r
- <buildCommand>\r
- <name>org.eclipse.jdt.core.javabuilder</name>\r
- <arguments>\r
- </arguments>\r
- </buildCommand>\r
- <buildCommand>\r
- <name>com.android.ide.eclipse.adt.ApkBuilder</name>\r
- <arguments>\r
- </arguments>\r
- </buildCommand>\r
- </buildSpec>\r
- <natures>\r
- <nature>com.android.ide.eclipse.adt.AndroidNature</nature>\r
- <nature>org.eclipse.jdt.core.javanature</nature>\r
- </natures>\r
-</projectDescription>\r
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>OpenCV Tutorial 0 - Android Camera</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ApkBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>com.android.ide.eclipse.adt.AndroidNature</nature>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ </natures>
+</projectDescription>
<?xml version="1.0" encoding="utf-8"?>\r
<resources>\r
- <string name="app_name">Tutorial 0 (Basic) - Android Camera</string>\r
+ <string name="app_name">OCV T0 Android Camera</string>\r
</resources>\r
<?xml version="1.0" encoding="UTF-8"?>\r
<projectDescription>\r
- <name>Tutorial 1 (Basic) - Add OpenCV</name>\r
+ <name>OpenCV Tutorial 1 - Add OpenCV</name>\r
<comment></comment>\r
<projects>\r
</projects>\r
<?xml version="1.0" encoding="utf-8"?>\r
<resources>\r
- <string name="app_name">Tutorial 1 (Basic) - Add OpenCV</string>\r
+ <string name="app_name">OCV T1 Add OpenCV</string>\r
</resources>\r
<?xml version="1.0" encoding="UTF-8"?>\r
<projectDescription>\r
- <name>Tutorial 2 (Basic) - Use OpenCV Camera</name>\r
+ <name>OpenCV Tutorial 2 - Use OpenCV Camera</name>\r
<comment></comment>\r
<projects>\r
</projects>\r
<?xml version="1.0" encoding="utf-8"?>\r
<resources>\r
- <string name="app_name">Tutorial 2 (Basic) - Use OpenCV Camera</string>\r
+ <string name="app_name">OCV T2 Use OpenCV Camera</string>\r
</resources>\r
<?xml version="1.0" encoding="UTF-8"?>\r
<projectDescription>\r
- <name>Tutorial 3 (Advanced) - Add Native OpenCV</name>\r
+ <name>OpenCV Tutorial 3 - Add Native OpenCV</name>\r
<comment></comment>\r
<projects>\r
</projects>\r
<?xml version="1.0" encoding="utf-8"?>\r
<resources>\r
- <string name="app_name">Tutorial 3 (Advanced) - Add Native OpenCV</string>\r
+ <string name="app_name">OCV T3 Add Native OpenCV</string>\r
</resources>\r
<?xml version="1.0" encoding="UTF-8"?>\r
<projectDescription>\r
- <name>Tutorial 4 (Advanced) - Mix Java+Native OpenCV</name>\r
+ <name>OpenCV Tutorial 4 - Mix Java+Native OpenCV</name>\r
<comment></comment>\r
<projects>\r
</projects>\r
<?xml version="1.0" encoding="utf-8"?>\r
<resources>\r
- <string name="app_name">Tutorial 4 (Advanced) - Mix Java+Native OpenCV</string>\r
+ <string name="app_name">OCV T4 Mix Java+Native OpenCV</string>\r
</resources>\r
// It shouldn't matter which object class is specified here - visual vocab will still be the same.
int vocabSize; //number of visual words in vocabulary to train
int memoryUse; // Memory to preallocate (in MB) when training vocab.
- // Change this depending on the size of the dataset/available memory.
+ // Change this depending on the size of the dataset/available memory.
float descProportion; // Specifies the number of descriptors to use from each image as a proportion of the total num descs.
};
if( !readVocabulary( filename, vocabulary) )
{
CV_Assert( dextractor->descriptorType() == CV_32FC1 );
- const int descByteSize = dextractor->descriptorSize()*4;
- const int maxDescCount = (trainParams.memoryUse * 1048576) / descByteSize; // Total number of descs to use for training.
+ const int elemSize = CV_ELEM_SIZE(dextractor->descriptorType());
+ const int descByteSize = dextractor->descriptorSize() * elemSize;
+ const int bytesInMB = 1048576;
+ const int maxDescCount = (trainParams.memoryUse * bytesInMB) / descByteSize; // Total number of descs to use for training.
cout << "Extracting VOC data..." << endl;
vector<ObdImage> images;
while( images.size() > 0 )
{
- if( bowTrainer.descripotorsCount() >= maxDescCount )
+ if( bowTrainer.descripotorsCount() > maxDescCount )
{
- assert( bowTrainer.descripotorsCount() == maxDescCount );
#ifdef DEBUG_DESC_PROGRESS
cout << "Breaking due to full memory ( descriptors count = " << bowTrainer.descripotorsCount()
<< "; descriptor size in bytes = " << descByteSize << "; all used memory = "
\r
SZ = 20 # size of each digit is SZ x SZ\r
CLASS_N = 10\r
+DIGITS_FN = 'digits.png'\r
\r
def load_digits(fn):\r
print 'loading "%s" ...' % fn\r
if __name__ == '__main__':\r
print __doc__\r
\r
- digits, labels = load_digits('digits.png')\r
+ digits, labels = load_digits(DIGITS_FN)\r
\r
print 'preprocessing...'\r
# shuffle digits\r
digits_adjust.py [--model {svm|knearest}] [--cloud] [--env <PiCloud environment>]\r
\r
--model {svm|knearest} - select the classifier (SVM is the default)\r
- --cloud - use PiCloud computing platform (for SVM only)\r
+ --cloud - use PiCloud computing platform\r
--env - cloud environment name\r
\r
'''\r
-# TODO dataset preprocessing in cloud\r
# TODO cloud env setup tutorial\r
\r
import numpy as np\r
\r
from digits import *\r
\r
+try: \r
+ import cloud\r
+ have_cloud = True\r
+except ImportError:\r
+ have_cloud = False\r
+ \r
+\r
+\r
def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None):\r
n = len(samples)\r
folds = np.array_split(np.arange(n), kfold)\r
scores = pool.map(f, xrange(kfold))\r
return np.mean(scores)\r
\r
-def adjust_KNearest(samples, labels):\r
- print 'adjusting KNearest ...'\r
- best_err, best_k = np.inf, -1\r
- for k in xrange(1, 9):\r
- err = cross_validate(KNearest, dict(k=k), samples, labels)\r
- if err < best_err:\r
- best_err, best_k = err, k\r
- print 'k = %d, error: %.2f %%' % (k, err*100)\r
- best_params = dict(k=best_k)\r
- print 'best params:', best_params\r
- return best_params\r
-\r
-def adjust_SVM(samples, labels, usecloud=False, cloud_env=''):\r
- Cs = np.logspace(0, 5, 10, base=2)\r
- gammas = np.logspace(-7, -2, 10, base=2)\r
- scores = np.zeros((len(Cs), len(gammas)))\r
- scores[:] = np.nan\r
-\r
- if usecloud:\r
- try: \r
- import cloud\r
- except ImportError: \r
- print 'cloud module is not installed'\r
+\r
+class App(object):\r
+ def __init__(self, usecloud=False, cloud_env=''):\r
+ if usecloud and not have_cloud:\r
+ print 'warning: cloud module is not installed, running locally'\r
usecloud = False\r
- if usecloud:\r
- print 'uploading dataset to cloud...'\r
- np.savez('train.npz', samples=samples, labels=labels)\r
- cloud.files.put('train.npz')\r
-\r
- print 'adjusting SVM (may take a long time) ...'\r
- def f(job):\r
- i, j = job\r
- params = dict(C = Cs[i], gamma=gammas[j])\r
- score = cross_validate(SVM, params, samples, labels)\r
- return i, j, score\r
- def fcloud(job):\r
- i, j = job\r
- cloud.files.get('train.npz')\r
- npz = np.load('train.npz')\r
- params = dict(C = Cs[i], gamma=gammas[j])\r
- score = cross_validate(SVM, params, npz['samples'], npz['labels'])\r
- return i, j, score\r
- \r
- if usecloud:\r
- jids = cloud.map(fcloud, np.ndindex(*scores.shape), _env=cloud_env, _profile=True)\r
- ires = cloud.iresult(jids)\r
- else:\r
- pool = ThreadPool(processes=cv2.getNumberOfCPUs())\r
- ires = pool.imap_unordered(f, np.ndindex(*scores.shape))\r
+ self.usecloud = usecloud\r
+ self.cloud_env = cloud_env\r
+\r
+ if self.usecloud:\r
+ print 'uploading dataset to cloud...'\r
+ cloud.files.put(DIGITS_FN)\r
+ self.preprocess_job = cloud.call(self.preprocess, _env=self.cloud_env)\r
+ else:\r
+ self._samples, self._labels = self.preprocess()\r
\r
- for count, (i, j, score) in enumerate(ires):\r
- scores[i, j] = score\r
- print '%d / %d (best error: %.2f %%, last: %.2f %%)' % (count+1, scores.size, np.nanmin(scores)*100, score*100)\r
- print scores\r
+ def preprocess(self):\r
+ if self.usecloud:\r
+ cloud.files.get(DIGITS_FN)\r
+ digits, labels = load_digits(DIGITS_FN)\r
+ shuffle = np.random.permutation(len(digits))\r
+ digits, labels = digits[shuffle], labels[shuffle]\r
+ digits2 = map(deskew, digits)\r
+ samples = np.float32(digits2).reshape(-1, SZ*SZ) / 255.0\r
+ return samples, labels\r
+\r
+ def get_dataset(self):\r
+ if self.usecloud:\r
+ return cloud.result(self.preprocess_job)\r
+ else:\r
+ return self._samples, self._labels\r
+\r
+ def run_jobs(self, f, jobs):\r
+ if self.usecloud:\r
+ jids = cloud.map(f, jobs, _env=self.cloud_env, _profile=True, _depends_on=self.preprocess_job)\r
+ ires = cloud.iresult(jids)\r
+ else:\r
+ pool = ThreadPool(processes=cv2.getNumberOfCPUs())\r
+ ires = pool.imap_unordered(f, jobs)\r
+ return ires\r
+ \r
+ def adjust_SVM(self):\r
+ Cs = np.logspace(0, 5, 10, base=2)\r
+ gammas = np.logspace(-7, -2, 10, base=2)\r
+ scores = np.zeros((len(Cs), len(gammas)))\r
+ scores[:] = np.nan\r
+\r
+ print 'adjusting SVM (may take a long time) ...'\r
+ def f(job):\r
+ i, j = job\r
+ samples, labels = self.get_dataset()\r
+ params = dict(C = Cs[i], gamma=gammas[j])\r
+ score = cross_validate(SVM, params, samples, labels)\r
+ return i, j, score\r
+ \r
+ ires = self.run_jobs(f, np.ndindex(*scores.shape))\r
+ for count, (i, j, score) in enumerate(ires):\r
+ scores[i, j] = score\r
+ print '%d / %d (best error: %.2f %%, last: %.2f %%)' % (count+1, scores.size, np.nanmin(scores)*100, score*100)\r
+ print scores\r
+\r
+ i, j = np.unravel_index(scores.argmin(), scores.shape)\r
+ best_params = dict(C = Cs[i], gamma=gammas[j])\r
+ print 'best params:', best_params\r
+ print 'best error: %.2f %%' % (scores.min()*100)\r
+ return best_params\r
+\r
+ def adjust_KNearest(self):\r
+ print 'adjusting KNearest ...'\r
+ def f(k):\r
+ samples, labels = self.get_dataset()\r
+ err = cross_validate(KNearest, dict(k=k), samples, labels)\r
+ return k, err\r
+ best_err, best_k = np.inf, -1\r
+ for k, err in self.run_jobs(f, xrange(1, 9)):\r
+ if err < best_err:\r
+ best_err, best_k = err, k\r
+ print 'k = %d, error: %.2f %%' % (k, err*100)\r
+ best_params = dict(k=best_k)\r
+ print 'best params:', best_params, 'err: %.2f' % (best_err*100)\r
+ return best_params\r
\r
- i, j = np.unravel_index(scores.argmin(), scores.shape)\r
- best_params = dict(C = Cs[i], gamma=gammas[j])\r
- print 'best params:', best_params\r
- print 'best error: %.2f %%' % (scores.min()*100)\r
- return best_params\r
\r
if __name__ == '__main__':\r
import getopt\r
\r
print __doc__\r
\r
+\r
args, _ = getopt.getopt(sys.argv[1:], '', ['model=', 'cloud', 'env='])\r
args = dict(args)\r
args.setdefault('--model', 'svm')\r
print 'unknown model "%s"' % args['--model']\r
sys.exit(1)\r
\r
- digits, labels = load_digits('digits.png')\r
- shuffle = np.random.permutation(len(digits))\r
- digits, labels = digits[shuffle], labels[shuffle]\r
- digits2 = map(deskew, digits)\r
- samples = np.float32(digits2).reshape(-1, SZ*SZ) / 255.0\r
- \r
t = clock()\r
+ app = App(usecloud='--cloud' in args, cloud_env = args['--env'])\r
if args['--model'] == 'knearest':\r
- adjust_KNearest(samples, labels)\r
+ app.adjust_KNearest()\r
else:\r
- adjust_SVM(samples, labels, usecloud='--cloud' in args, cloud_env = args['--env'])\r
+ app.adjust_SVM()\r
print 'work time: %f s' % (clock() - t)\r
-
\ No newline at end of file
--- /dev/null
+'''\r
+Robust line fitting.\r
+==================\r
+\r
+Example of using cv2.fitLine function for fitting line to points in presence of outliers.\r
+\r
+Usage\r
+-----\r
+fitline.py\r
+\r
+Switch through different M-estimator functions and see, how well the robust functions\r
+fit the line even in case of ~50% of outliers.\r
+\r
+Keys\r
+----\r
+SPACE - generaty random points\r
+f - change distance function\r
+ESC - exit\r
+'''\r
+\r
+import numpy as np\r
+import cv2\r
+import itertools as it\r
+from common import draw_str\r
+\r
+\r
+w, h = 512, 256\r
+\r
+def toint(p):\r
+ return tuple(map(int, p))\r
+\r
+def sample_line(p1, p2, n, noise=0.0):\r
+ p1 = np.float32(p1)\r
+ t = np.random.rand(n,1)\r
+ return p1 + (p2-p1)*t + np.random.normal(size=(n, 2))*noise\r
+\r
+dist_func_names = it.cycle('CV_DIST_L2 CV_DIST_L1 CV_DIST_L12 CV_DIST_FAIR CV_DIST_WELSCH CV_DIST_HUBER'.split())\r
+cur_func_name = dist_func_names.next()\r
+\r
+def update(_=None):\r
+ noise = cv2.getTrackbarPos('noise', 'fit line')\r
+ n = cv2.getTrackbarPos('point n', 'fit line')\r
+ r = cv2.getTrackbarPos('outlier %', 'fit line') / 100.0\r
+ outn = int(n*r)\r
+\r
+ p0, p1 = (90, 80), (w-90, h-80)\r
+ img = np.zeros((h, w, 3), np.uint8)\r
+ cv2.line(img, toint(p0), toint(p1), (0, 255, 0))\r
+\r
+ if n > 0:\r
+ line_points = sample_line(p0, p1, n-outn, noise)\r
+ outliers = np.random.rand(outn, 2) * (w, h)\r
+ points = np.vstack([line_points, outliers])\r
+ for p in line_points:\r
+ cv2.circle(img, toint(p), 2, (255, 255, 255), -1)\r
+ for p in outliers:\r
+ cv2.circle(img, toint(p), 2, (64, 64, 255), -1)\r
+ func = getattr(cv2.cv, cur_func_name)\r
+ vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)\r
+ cv2.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255))\r
+\r
+ draw_str(img, (20, 20), cur_func_name)\r
+ cv2.imshow('fit line', img)\r
+\r
+if __name__ == '__main__':\r
+ print __doc__\r
+\r
+ cv2.namedWindow('fit line')\r
+ cv2.createTrackbar('noise', 'fit line', 3, 50, update)\r
+ cv2.createTrackbar('point n', 'fit line', 100, 500, update)\r
+ cv2.createTrackbar('outlier %', 'fit line', 30, 100, update)\r
+ while True:\r
+ update()\r
+ ch = cv2.waitKey(0)\r
+ if ch == ord('f'):\r
+ cur_func_name = dist_func_names.next()\r
+ if ch == 27:\r
+ break\r
+'''\r
+Video capture sample.\r
+\r
+Sample shows how VideoCapture class can be used to acquire video\r
+frames from a camera of a movie file. Also the sample provides \r
+an example of procedural video generation by an object, mimicking \r
+the VideoCapture interface (see Chess class). \r
+\r
+'create_capture' is a convinience function for capture creation, \r
+falling back to procedural video in case of error.\r
+\r
+Usage:\r
+ video.py [--shotdir <shot path>] [source0] [source1] ...'\r
+\r
+ sourceN is an\r
+ - integer number for camera capture\r
+ - name of video file\r
+ - synth:<params> for procedural video\r
+\r
+Synth examples:\r
+ synth:bg=../cpp/lena.jpg:noise=0.1\r
+ synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480\r
+\r
+Keys:\r
+ ESC - exit\r
+ SPACE - save current frame to <shot path> directory\r
+\r
+'''\r
+\r
import numpy as np\r
import cv2\r
from time import clock\r
\r
\r
def create_capture(source = 0, fallback = presets['chess']):\r
- '''\r
- source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'\r
+ '''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'\r
'''\r
source = str(source).strip()\r
chunks = source.split(':')\r
import sys\r
import getopt\r
\r
- print 'USAGE: video.py [--shotdir <dir>] [source0] [source1] ...'\r
- print "source: '<int>' or '<filename>' or 'synth:<params>'"\r
- print\r
+ print __doc__\r
\r
args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=')\r
args = dict(args)\r
if len(sources) == 0:\r
sources = [ 0 ]\r
\r
- print 'Press SPACE to save current frame'\r
-\r
caps = map(create_capture, sources)\r
shot_idx = 0\r
while True:\r
-import numpy as np\r
-import cv2\r
-from common import Sketcher\r
+'''\r
+Watershed segmentation\r
+=========\r
+\r
+This program demonstrates the watershed segmentation algorithm \r
+in OpenCV: watershed().\r
\r
-help_message = '''\r
- USAGE: watershed.py [<image>]\r
+Usage\r
+-----\r
+watershed.py [image filename]\r
\r
- Use keys 1 - 7 to switch marker color\r
+Keys\r
+----\r
+ 1-7 - switch marker color\r
SPACE - update segmentation\r
r - reset\r
- a - switch autoupdate\r
+ a - toggle autoupdate\r
ESC - exit\r
\r
'''\r
\r
+\r
+\r
+\r
+import numpy as np\r
+import cv2\r
+from common import Sketcher\r
+\r
class App:\r
def __init__(self, fn):\r
self.img = cv2.imread(fn)\r
import sys\r
try: fn = sys.argv[1]\r
except: fn = '../cpp/fruits.jpg'\r
- print help_message\r
+ print __doc__\r
App(fn).run()\r