# ===================================================
OCV_OPTION(BUILD_SHARED_LIBS "Build shared libraries (.dll/.so) instead of static ones (.lib/.a)" NOT (ANDROID OR APPLE_FRAMEWORK) )
OCV_OPTION(BUILD_opencv_apps "Build utility applications (used for example to train classifiers)" (NOT ANDROID AND NOT WINRT) IF (NOT APPLE_FRAMEWORK) )
+OCV_OPTION(BUILD_opencv_js "Build JavaScript bindings by Emscripten" OFF )
OCV_OPTION(BUILD_ANDROID_EXAMPLES "Build examples for Android platform" ON IF ANDROID )
OCV_OPTION(BUILD_DOCS "Create build rules for OpenCV Documentation" ON IF (NOT WINRT OR APPLE_FRAMEWORK))
OCV_OPTION(BUILD_EXAMPLES "Build all examples" OFF )
if(BUILD_DOCS AND DOXYGEN_FOUND)
# not documented modules list
- list(APPEND blacklist "ts" "java" "python2" "python3" "world" "contrib_world")
+ list(APPEND blacklist "ts" "java" "python2" "python3" "js" "world" "contrib_world")
unset(CMAKE_DOXYGEN_TUTORIAL_CONTRIB_ROOT)
+ unset(CMAKE_DOXYGEN_TUTORIAL_JS_ROOT)
# gathering headers
set(paths_include)
set(faqfile "${CMAKE_CURRENT_SOURCE_DIR}/faq.markdown")
set(tutorial_path "${CMAKE_CURRENT_SOURCE_DIR}/tutorials")
set(tutorial_py_path "${CMAKE_CURRENT_SOURCE_DIR}/py_tutorials")
+ set(CMAKE_DOXYGEN_TUTORIAL_JS_ROOT "- @ref tutorial_js_root")
+ set(tutorial_js_path "${CMAKE_CURRENT_SOURCE_DIR}/js_tutorials")
set(example_path "${CMAKE_SOURCE_DIR}/samples")
# set export variables
- string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${faqfile} ; ${paths_include} ; ${paths_hal_interface} ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${paths_tutorial} ; ${tutorial_contrib_root}")
- string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${paths_tutorial}")
+ string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${faqfile} ; ${paths_include} ; ${paths_hal_interface} ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial} ; ${tutorial_contrib_root}")
+ string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial}")
# TODO: remove paths_doc from EXAMPLE_PATH after face module tutorials/samples moved to separate folders
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXAMPLE_PATH "${example_path} ; ${paths_doc} ; ${paths_sample}")
set(CMAKE_DOXYGEN_LAYOUT "${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml")
configure_file(Doxyfile.in ${doxyfile} @ONLY)
configure_file(root.markdown.in ${rootfile} @ONLY)
+ # js tutorial assets
+ set(opencv_tutorial_html_dir "${CMAKE_CURRENT_BINARY_DIR}/doxygen/html")
+ set(js_tutorials_assets_dir "${CMAKE_CURRENT_SOURCE_DIR}/js_tutorials/js_assets")
+ set(js_tutorials_assets_deps "")
+
+ # make sure the build directory exists
+ file(MAKE_DIRECTORY "${opencv_tutorial_html_dir}")
+
+ # gather and copy specific files for js tutorials
+ file(GLOB_RECURSE js_assets "${js_tutorials_assets_dir}/*")
+ ocv_list_filterout(js_assets "\\\\.eslintrc.json")
+ list(APPEND js_assets "${OpenCV_SOURCE_DIR}/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/box.mp4")
+
+ if(BUILD_opencv_js)
+ set(ocv_js_dir "${CMAKE_BINARY_DIR}/bin")
+ set(ocv_js "opencv.js")
+ list(APPEND js_assets "${ocv_js_dir}/${ocv_js}")
+ endif()
+
+ # copy haar cascade files
+ set(haar_cascade_files "")
+ set(data_harrcascades_path "${OpenCV_SOURCE_DIR}/data/haarcascades/")
+ list(APPEND js_tutorials_assets_deps "${data_harrcascades_path}/haarcascade_frontalface_default.xml" "${data_harrcascades_path}/haarcascade_eye.xml")
+
+ foreach(f ${js_assets})
+ get_filename_component(fname "${f}" NAME)
+ add_custom_command(OUTPUT "${opencv_tutorial_html_dir}/${fname}"
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different "${f}" "${opencv_tutorial_html_dir}/${fname}"
+ DEPENDS "${f}"
+ COMMENT "Copying ${fname}"
+ )
+ list(APPEND js_tutorials_assets_deps "${f}" "${opencv_tutorial_html_dir}/${fname}")
+ endforeach()
+
add_custom_target(doxygen
COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile}
- DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps}
+ DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps} ${js_tutorials_assets_deps}
)
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doxygen/html
DESTINATION "${OPENCV_DOC_INSTALL_PATH}"
FORMULA_TRANSPARENT = YES
USE_MATHJAX = YES
MATHJAX_FORMAT = HTML-CSS
-MATHJAX_RELPATH = http://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0
+MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0
MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
MATHJAX_CODEFILE = @CMAKE_CURRENT_SOURCE_DIR@/mymath.js
SEARCHENGINE = YES
--- /dev/null
+{
+ "extends": "google",
+ "parserOptions": {
+ "ecmaVersion": 6
+ },
+ "rules": {
+ "max-len": ["error", 100, {"ignoreUrls": true}],
+ "quotes": ["error", "single"],
+ "indent": ["error", 4, {"ArrayExpression": "first",
+ "ObjectExpression": "first",
+ "CallExpression": {"arguments": "first"},
+ "SwitchCase": 1}],
+ "require-jsdoc": "off",
+ "new-cap": "off"
+ },
+ "plugins": ["html"],
+ "settings": {
+ "html/javascript-mime-types": ["text/javascript", "text/code-snippet"],
+ "html/indent": "0",
+ "html/report-bad-indent": "error"
+ }
+}
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Padding Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Padding Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+// You can try more different parameters
+let s = new cv.Scalar(255, 0, 0, 255);
+cv.copyMakeBorder(src, dst, 10, 10, 10, 10, cv.BORDER_CONSTANT, s);
+cv.imshow('canvasOutput', dst);
+src.delete();
+dst.delete();
+</script>
+<script>
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image ROI Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image ROI Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+// You can try more different parameters
+let rect = new cv.Rect(100, 100, 200, 200);
+dst = src.roi(rect);
+cv.imshow('canvasOutput', dst);
+src.delete();
+dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Background Subtraction Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Background Subtraction Example</h2>
+<p>
+ Click <b>Start/Stop</b> button to start or stop the camera capture.<br>
+ The <b>videoInput</b> is a <video> element used as input.
+ The <b>canvasOutput</b> is a <canvas> element used as output.<br>
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+</p>
+<div>
+<div class="control"><button id="startAndStop" disabled>Start</button></div>
+<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false">
+</textarea>
+</div>
+<p class="err" id="errorMessage"></p>
+<div>
+ <table cellpadding="0" cellspacing="0" width="0" border="0">
+ <tr>
+ <td>
+ <video id="videoInput" width="320" height="240" muted loop></video>
+ </td>
+ <td>
+ <canvas id="canvasOutput" width="320" height="240"></canvas>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <div class="caption">videoInput</div>
+ </td>
+ <td>
+ <div class="caption">canvasOutput</div>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ </table>
+</div>
+<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let video = document.getElementById('videoInput');
+let cap = new cv.VideoCapture(video);
+
+let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+let fgmask = new cv.Mat(video.height, video.width, cv.CV_8UC1);
+let fgbg = new cv.BackgroundSubtractorMOG2(500, 16, true);
+
+const FPS = 30;
+function processVideo() {
+ try {
+ if (!streaming) {
+ // clean and stop.
+ frame.delete(); fgmask.delete(); fgbg.delete();
+ return;
+ }
+ let begin = Date.now();
+ // start processing.
+ cap.read(frame);
+ fgbg.apply(frame, fgmask);
+ cv.imshow('canvasOutput', fgmask);
+ // schedule the next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+ } catch (err) {
+ utils.printError(err);
+ }
+};
+
+// schedule the first one.
+setTimeout(processVideo, 0);
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+
+let streaming = false;
+let videoInput = document.getElementById('videoInput');
+let startAndStop = document.getElementById('startAndStop');
+let canvasOutput = document.getElementById('canvasOutput');
+let canvasContext = canvasOutput.getContext('2d');
+
+startAndStop.addEventListener('click', () => {
+ if (!streaming) {
+ utils.clearError();
+ videoInput.play().then(() => {
+ onVideoStarted();
+ });
+ } else {
+ videoInput.pause();
+ videoInput.currentTime = 0;
+ onVideoStopped();
+ }
+});
+
+function onVideoStarted() {
+ streaming = true;
+ startAndStop.innerText = 'Stop';
+ videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
+ utils.executeCode('codeEditor');
+}
+
+function onVideoStopped() {
+ streaming = false;
+ canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
+ startAndStop.innerText = 'Start';
+}
+
+utils.loadOpenCv(() => {
+ videoInput.addEventListener('canplay', () => {
+ startAndStop.removeAttribute('disabled');
+ });
+ videoInput.src = 'box.mp4';
+});
+</script>
+</body>
+</html>
\ No newline at end of file
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>CamShift Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>CamShift Example</h2>
+<p>
+ Click <b>Start/Stop</b> button to start or stop the video.<br>
+ The <b>videoInput</b> is a <video> element used as CamShift input.
+ The <b>canvasOutput</b> is a <canvas> element used as CamShift output.<br>
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+</p>
+<div>
+<div class="control"><button id="startAndStop" disabled>Start</button></div>
+<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+</div>
+<p class="err" id="errorMessage"></p>
+<div>
+ <table cellpadding="0" cellspacing="0" width="0" border="0">
+ <tr>
+ <td>
+ <video id="videoInput" width="320" height="240" muted loop></video>
+ </td>
+ <td>
+ <canvas id="canvasOutput" width="320" height="240"></canvas>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <div class="caption">videoInput</div>
+ </td>
+ <td>
+ <div class="caption">canvasOutput</div>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ </table>
+</div>
+<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let video = document.getElementById('videoInput');
+let cap = new cv.VideoCapture(video);
+
+// take first frame of the video
+let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+cap.read(frame);
+
+// hardcode the initial location of window
+let trackWindow = new cv.Rect(150, 60, 63, 125);
+
+// set up the ROI for tracking
+let roi = frame.roi(trackWindow);
+let hsvRoi = new cv.Mat();
+cv.cvtColor(roi, hsvRoi, cv.COLOR_RGBA2RGB);
+cv.cvtColor(hsvRoi, hsvRoi, cv.COLOR_RGB2HSV);
+let mask = new cv.Mat();
+let lowScalar = new cv.Scalar(30, 30, 0);
+let highScalar = new cv.Scalar(180, 180, 180);
+let low = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), lowScalar);
+let high = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), highScalar);
+cv.inRange(hsvRoi, low, high, mask);
+let roiHist = new cv.Mat();
+let hsvRoiVec = new cv.MatVector();
+hsvRoiVec.push_back(hsvRoi);
+cv.calcHist(hsvRoiVec, [0], mask, roiHist, [180], [0, 180]);
+cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);
+
+// delete useless mats.
+roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();
+
+// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
+let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);
+
+let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
+let hsvVec = new cv.MatVector();
+hsvVec.push_back(hsv);
+let dst = new cv.Mat();
+let trackBox = null;
+
+const FPS = 30;
+function processVideo() {
+ try {
+ if (!streaming) {
+ // clean and stop.
+ frame.delete(); dst.delete(); hsvVec.delete(); roiHist.delete(); hsv.delete();
+ return;
+ }
+ let begin = Date.now();
+
+ // start processing.
+ cap.read(frame);
+ cv.cvtColor(frame, hsv, cv.COLOR_RGBA2RGB);
+ cv.cvtColor(hsv, hsv, cv.COLOR_RGB2HSV);
+ cv.calcBackProject(hsvVec, [0], roiHist, dst, [0, 180], 1);
+
+ // apply camshift to get the new location
+ [trackBox, trackWindow] = cv.CamShift(dst, trackWindow, termCrit);
+
+ // Draw it on image
+ let pts = cv.rotatedRectPoints(trackBox);
+ cv.line(frame, pts[0], pts[1], [255, 0, 0, 255], 3);
+ cv.line(frame, pts[1], pts[2], [255, 0, 0, 255], 3);
+ cv.line(frame, pts[2], pts[3], [255, 0, 0, 255], 3);
+ cv.line(frame, pts[3], pts[0], [255, 0, 0, 255], 3);
+ cv.imshow('canvasOutput', frame);
+
+ // schedule the next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+ } catch (err) {
+ utils.printError(err);
+ }
+};
+
+// schedule the first one.
+setTimeout(processVideo, 0);
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+
+let streaming = false;
+let videoInput = document.getElementById('videoInput');
+let startAndStop = document.getElementById('startAndStop');
+let canvasOutput = document.getElementById('canvasOutput');
+let canvasContext = canvasOutput.getContext('2d');
+
+startAndStop.addEventListener('click', () => {
+ if (!streaming) {
+ utils.clearError();
+ videoInput.play().then(() => {
+ onVideoStarted();
+ });
+ } else {
+ videoInput.pause();
+ videoInput.currentTime = 0;
+ onVideoStopped();
+ }
+});
+
+function onVideoStarted() {
+ streaming = true;
+ startAndStop.innerText = 'Stop';
+ videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
+ utils.executeCode('codeEditor');
+}
+
+function onVideoStopped() {
+ streaming = false;
+ canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
+ startAndStop.innerText = 'Start';
+}
+
+utils.loadOpenCv(() => {
+ videoInput.addEventListener('canplay', () => {
+ startAndStop.removeAttribute('disabled');
+ });
+ videoInput.src = 'cup.mp4';
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Canny Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Canny Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
+// You can try more different parameters
+cv.Canny(src, dst, 50, 100, 3, false);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Convert Color Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Convert Color Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+// You can try more different parameters
+cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY, 0);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image InRange Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image InRange Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let low = new cv.Mat(src.rows, src.cols, src.type(), [0, 0, 0, 0]);
+let high = new cv.Mat(src.rows, src.cols, src.type(), [150, 150, 150, 255]);
+// You can try more different parameters
+cv.inRange(src, low, high, dst);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); low.delete(); high.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image ApproxPolyDP Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image ApproxPolyDP Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 100, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+let poly = new cv.MatVector();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+// approximates each contour to polygon
+for (let i = 0; i < contours.size(); ++i) {
+ let tmp = new cv.Mat();
+ let cnt = contours.get(i);
+ // You can try more different parameters
+ cv.approxPolyDP(cnt, tmp, 3, true);
+ poly.push_back(tmp);
+ cnt.delete(); tmp.delete();
+}
+// draw contours with random Scalar
+for (let i = 0; i < contours.size(); ++i) {
+ let color = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
+ Math.round(Math.random() * 255));
+ cv.drawContours(dst, poly, i, color, 1, 8, hierarchy, 0);
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); hierarchy.delete(); contours.delete(); poly.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Area Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Area Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <p><strong>The area is: </strong><span id="areaOutput"></span></p>
+ </div>
+
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let cnt = contours.get(20);
+// You can try more different parameters
+let area = cv.contourArea(cnt, false);
+areaOutput.innerHTML = area;
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Bounding Rect Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Bounding Rect Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let cnt = contours.get(0);
+// You can try more different parameters
+let rect = cv.boundingRect(cnt);
+let contoursColor = new cv.Scalar(255, 255, 255);
+let rectangleColor = new cv.Scalar(255, 0, 0);
+cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
+let point1 = new cv.Point(rect.x, rect.y);
+let point2 = new cv.Point(rect.x + rect.width, rect.y + rect.height);
+cv.rectangle(dst, point1, point2, rectangleColor, 2, cv.LINE_AA, 0);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Convex Hull Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Convex Hull Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 100, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+let hull = new cv.MatVector();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+// approximates each contour to convex hull
+for (let i = 0; i < contours.size(); ++i) {
+ let tmp = new cv.Mat();
+ let cnt = contours.get(i);
+ // You can try more different parameters
+ cv.convexHull(cnt, tmp, false, true);
+ hull.push_back(tmp);
+ cnt.delete(); tmp.delete();
+}
+// draw contours with random Scalar
+for (let i = 0; i < contours.size(); ++i) {
+ let colorHull = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
+ Math.round(Math.random() * 255));
+ cv.drawContours(dst, hull, i, colorHull, 1, 8, hierarchy, 0);
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); hierarchy.delete(); contours.delete(); hull.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Fit Ellipse Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Fit Ellipse Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let cnt = contours.get(0);
+// You can try more different parameters
+let rotatedRect = cv.fitEllipse(cnt);
+let contoursColor = new cv.Scalar(255, 255, 255);
+let ellipseColor = new cv.Scalar(255, 0, 0);
+cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
+cv.ellipse1(dst, rotatedRect, ellipseColor, 1, cv.LINE_8);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Fit Line Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Fit Line Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+let line = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let cnt = contours.get(0);
+// You can try more different parameters
+cv.fitLine(cnt, line, cv.DIST_L2, 0, 0.01, 0.01);
+let contoursColor = new cv.Scalar(255, 255, 255);
+let lineColor = new cv.Scalar(255, 0, 0);
+cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
+let vx = line.data32F[0];
+let vy = line.data32F[1];
+let x = line.data32F[2];
+let y = line.data32F[3];
+let lefty = Math.round((-x * vy / vx) + y);
+let righty = Math.round(((src.cols - x) * vy / vx) + y);
+let point1 = new cv.Point(src.cols - 1, righty);
+let point2 = new cv.Point(0, lefty);
+cv.line(dst, point1, point2, lineColor, 2, cv.LINE_AA, 0);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); line.delete(); cnt.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Min Area Rect Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Min Area Rect Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let cnt = contours.get(0);
+// You can try more different parameters
+let rotatedRect = cv.minAreaRect(cnt);
+let vertices = cv.RotatedRect.points(rotatedRect);
+let contoursColor = new cv.Scalar(255, 255, 255);
+let rectangleColor = new cv.Scalar(255, 0, 0);
+cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
+// draw rotatedRect
+for (let i = 0; i < 4; i++) {
+ cv.line(dst, vertices[i], vertices[(i + 1) % 4], rectangleColor, 2, cv.LINE_AA, 0);
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Min Enclosing Circle Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Min Enclosing Circle Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let cnt = contours.get(0);
+// You can try more different parameters
+let circle = cv.minEnclosingCircle(cnt);
+let contoursColor = new cv.Scalar(255, 255, 255);
+let circleColor = new cv.Scalar(255, 0, 0);
+cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
+cv.circle(dst, circle.center, circle.radius, circleColor);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Moments Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Moments Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <p><strong>The m00 is: </strong><span id="momentsOutput"></span></p>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let cnt = contours.get(0);
+// You can try more different parameters
+let Moments = cv.moments(cnt, false);
+momentsOutput.innerHTML = Moments.m00;
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Perimeter Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Perimeter Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <p><strong>The perimeter is: </strong><span id="perimeterOutput"></span></p>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let cnt = contours.get(20);
+// You can try more different parameters
+let perimeter = cv.arcLength(cnt, true);
+perimeterOutput.innerHTML = perimeter;
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Transpose Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Transpose Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 120, 200, cv.THRESH_BINARY);
+cv.transpose(src, dst);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Contours Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Contours Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.cols, src.rows, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 120, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+// You can try more different parameters
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+// draw contours with random Scalar
+for (let i = 0; i < contours.size(); ++i) {
+ let color = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
+ Math.round(Math.random() * 255));
+ cv.drawContours(dst, contours, i, color, 1, cv.LINE_8, hierarchy, 100);
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Convexity Defects Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Convexity Defects Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 100, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let hull = new cv.Mat();
+let defect = new cv.Mat();
+let cnt = contours.get(0);
+let lineColor = new cv.Scalar(255, 0, 0);
+let circleColor = new cv.Scalar(255, 255, 255);
+cv.convexHull(cnt, hull, false, false);
+cv.convexityDefects(cnt, hull, defect);
+for (let i = 0; i < defect.rows; ++i) {
+ let start = new cv.Point(cnt.data32S[defect.data32S[i * 4] * 2],
+ cnt.data32S[defect.data32S[i * 4] * 2 + 1]);
+ let end = new cv.Point(cnt.data32S[defect.data32S[i * 4 + 1] * 2],
+ cnt.data32S[defect.data32S[i * 4 + 1] * 2 + 1]);
+ let far = new cv.Point(cnt.data32S[defect.data32S[i * 4 + 2] * 2],
+ cnt.data32S[defect.data32S[i * 4 + 2] * 2 + 1]);
+ cv.line(dst, start, end, lineColor, 2, cv.LINE_AA, 0);
+ cv.circle(dst, far, 3, circleColor, -1);
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); hierarchy.delete(); contours.delete(); hull.delete(); defect.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Match Shape Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Match Shape Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+ <div class="inputoutput">
+ <p><strong>The result is: </strong><span id="matchShapesOutput"></span></p>
+ </div>
+
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
+let contours = new cv.MatVector();
+let hierarchy = new cv.Mat();
+cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
+let contourID0 = 10;
+let contourID1 = 5;
+let color0 = new cv.Scalar(255, 0, 0);
+let color1 = new cv.Scalar(0, 0, 255);
+// You can try more different parameters
+let result = cv.matchShapes(contours.get(contourID0), contours.get(contourID1), 1, 0);
+matchShapesOutput.innerHTML = result;
+cv.drawContours(dst, contours, contourID0, color0, 1, cv.LINE_8, hierarchy, 100);
+cv.drawContours(dst, contours, contourID1, color1, 1, cv.LINE_8, hierarchy, 100);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); contours.delete(); hierarchy.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('coins.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+body, div, p {
+ font: 400 14px/22px Roboto,sans-serif;
+}
+canvas, img, video {
+ border: 1px solid black;
+}
+td {
+ padding: 10px 0px 0px 10px;
+ text-align: center;
+}
+button {
+ display: inline-block;
+ color: #fff;
+ background-color: #337ab7;
+ border-color: #2e6da4;
+ padding: 6px 12px;
+ margin-bottom: 0;
+ font-size: 14px;
+ font-weight: bold;
+ text-align: center;
+ white-space: nowrap;
+ vertical-align: middle;
+ -ms-touch-action: manipulation;
+ touch-action: manipulation;
+ cursor: pointer;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+ background-image: none;
+ border: 1px solid transparent;
+ border-radius: 4px;
+}
+button[disabled] {
+ cursor: not-allowed;
+ filter: alpha(opacity=65);
+ -webkit-box-shadow: none;
+ box-shadow: none;
+ opacity: .65;
+}
+.control {
+ margin-bottom: 3px;
+}
+.err {
+ color: red;
+ font-weight: bold;
+}
+.inputoutput {
+ margin-top: 1em;
+ margin-bottom: 1em;
+}
+.caption {
+ margin: 0;
+ font-weight: bold;
+}
+.code {
+ padding: 4px 6px;
+ margin: 4px 8px 4px 2px;
+ background-color: #FBFCFD;
+ border: 1px solid #C4CFE5;
+ font-family: monospace, fixed;
+ font-size: 13px;
+ min-height: 13px;
+ line-height: 1.0;
+ text-wrap: unrestricted;
+ padding-bottom: 0px;
+ margin: 0px;
+}
+.hidden {
+ display: none;
+}
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Face Detection Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Face Detection Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let gray = new cv.Mat();
+cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
+let faces = new cv.RectVector();
+let eyes = new cv.RectVector();
+let faceCascade = new cv.CascadeClassifier();
+let eyeCascade = new cv.CascadeClassifier();
+// load pre-trained classifiers
+faceCascade.load('haarcascade_frontalface_default.xml');
+eyeCascade.load('haarcascade_eye.xml');
+// detect faces
+let msize = new cv.Size(0, 0);
+faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, msize, msize);
+for (let i = 0; i < faces.size(); ++i) {
+ let roiGray = gray.roi(faces.get(i));
+ let roiSrc = src.roi(faces.get(i));
+ let point1 = new cv.Point(faces.get(i).x, faces.get(i).y);
+ let point2 = new cv.Point(faces.get(i).x + faces.get(i).width,
+ faces.get(i).y + faces.get(i).height);
+ cv.rectangle(src, point1, point2, [255, 0, 0, 255]);
+ // detect eyes in face ROI
+ eyeCascade.detectMultiScale(roiGray, eyes);
+ for (let j = 0; j < eyes.size(); ++j) {
+ let point1 = new cv.Point(eyes.get(j).x, eyes.get(j).y);
+ let point2 = new cv.Point(eyes.get(j).x + eyes.get(j).width,
+ eyes.get(j).y + eyes.get(i).height);
+ cv.rectangle(roiSrc, point1, point2, [0, 0, 255, 255]);
+ }
+ roiGray.delete(); roiSrc.delete();
+}
+cv.imshow('canvasOutput', src);
+src.delete(); gray.delete(); faceCascade.delete();
+eyeCascade.delete(); faces.delete(); eyes.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+let Module = {
+ preRun: [function() {
+ Module.FS_createPreloadedFile('/', 'haarcascade_eye.xml',
+ 'haarcascade_eye.xml', true, false);
+ let frontalfaceXML = 'haarcascade_frontalface_default.xml';
+ let frontalfaceDefaultXML = 'haarcascade_frontalface_default.xml';
+ Module.FS_createPreloadedFile('/', frontalfaceXML,
+ frontalfaceDefaultXML, true, false);
+ }],
+};
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Face Detection Camera Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Face Detection Camera Example</h2>
+<p>
+ Click <b>Start/Stop</b> button to start or stop the camera capture.<br>
+ The <b>videoInput</b> is a <video> element used as face detector input.
+ The <b>canvasOutput</b> is a <canvas> element used as face detector output.<br>
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+</p>
+<div>
+<div class="control"><button id="startAndStop" disabled>Start</button></div>
+<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false">
+</textarea>
+</div>
+<p class="err" id="errorMessage"></p>
+<div>
+ <table cellpadding="0" cellspacing="0" width="0" border="0">
+ <tr>
+ <td>
+ <video id="videoInput" width=320 height=240></video>
+ </td>
+ <td>
+ <canvas id="canvasOutput" width=320 height=240></canvas>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <div class="caption">videoInput</div>
+ </td>
+ <td>
+ <div class="caption">canvasOutput</div>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ </table>
+</div>
+<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let video = document.getElementById('videoInput');
+let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+let dst = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+let gray = new cv.Mat();
+let cap = new cv.VideoCapture(video);
+let faces = new cv.RectVector();
+let classifier = new cv.CascadeClassifier();
+
+// load pre-trained classifiers
+classifier.load('haarcascade_frontalface_default.xml');
+
+const FPS = 30;
+function processVideo() {
+ try {
+ if (!streaming) {
+ // clean and stop.
+ src.delete();
+ dst.delete();
+ gray.delete();
+ faces.delete();
+ classifier.delete();
+ return;
+ }
+ let begin = Date.now();
+ // start processing.
+ cap.read(src);
+ src.copyTo(dst);
+ cv.cvtColor(dst, gray, cv.COLOR_RGBA2GRAY, 0);
+ // detect faces.
+ classifier.detectMultiScale(gray, faces, 1.1, 3, 0);
+ // draw faces.
+ for (let i = 0; i < faces.size(); ++i) {
+ let face = faces.get(i);
+ let point1 = new cv.Point(face.x, face.y);
+ let point2 = new cv.Point(face.x + face.width, face.y + face.height);
+ cv.rectangle(dst, point1, point2, [255, 0, 0, 255]);
+ }
+ cv.imshow('canvasOutput', dst);
+ // schedule the next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+ } catch (err) {
+ utils.printError(err);
+ }
+};
+
+// schedule the first one.
+setTimeout(processVideo, 0);
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+
+let streaming = false;
+let videoInput = document.getElementById('videoInput');
+let startAndStop = document.getElementById('startAndStop');
+let canvasOutput = document.getElementById('canvasOutput');
+let canvasContext = canvasOutput.getContext('2d');
+
+startAndStop.addEventListener('click', () => {
+ if (!streaming) {
+ utils.clearError();
+ utils.startCamera('qvga', onVideoStarted, 'videoInput');
+ } else {
+ utils.stopCamera();
+ onVideoStopped();
+ }
+});
+
+function onVideoStarted() {
+ streaming = true;
+ startAndStop.innerText = 'Stop';
+ videoInput.width = videoInput.videoWidth;
+ videoInput.height = videoInput.videoHeight;
+ utils.executeCode('codeEditor');
+}
+
+function onVideoStopped() {
+ streaming = false;
+ canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
+ startAndStop.innerText = 'Start';
+}
+
+let Module = {
+ preRun: [function() {
+ Module.FS_createPreloadedFile(
+ '/', 'haarcascade_frontalface_default.xml',
+ 'haarcascade_frontalface_default.xml', true, false);
+ }],
+};
+
+utils.loadOpenCv(() => {
+ startAndStop.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Gaussian Blur Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Gaussian Blur Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let ksize = new cv.Size(3, 3);
+// You can try more different parameters
+cv.GaussianBlur(src, dst, ksize, 0, 0, cv.BORDER_DEFAULT);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Bilateral Filter Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Bilateral Filter Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGBA2RGB, 0);
+// You can try more different parameters
+cv.bilateralFilter(src, dst, 9, 75, 75, cv.BORDER_DEFAULT);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Blur Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Blur Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let ksize = new cv.Size(3, 3);
+let anchor = new cv.Point(-1, -1);
+// You can try more different parameters
+cv.blur(src, dst, ksize, anchor, cv.BORDER_DEFAULT);
+// cv.boxFilter(src, dst, -1, ksize, anchor, true, cv.BORDER_DEFAULT)
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Filter Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Filter Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let M = cv.Mat.eye(3, 3, cv.CV_32FC1);
+let anchor = new cv.Point(-1, -1);
+// You can try more different parameters
+cv.filter2D(src, dst, cv.CV_8U, M, anchor, 0, cv.BORDER_DEFAULT);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Median Blur Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Median Blur Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+// You can try more different parameters
+cv.medianBlur(src, dst, 5);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image DFT Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image DFT Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+
+// get optimal size of DFT
+let optimalRows = cv.getOptimalDFTSize(src.rows);
+let optimalCols = cv.getOptimalDFTSize(src.cols);
+let s0 = cv.Scalar.all(0);
+let padded = new cv.Mat();
+cv.copyMakeBorder(src, padded, 0, optimalRows - src.rows, 0,
+ optimalCols - src.cols, cv.BORDER_CONSTANT, s0);
+
+// use cv.MatVector to distribute space for real part and imaginary part
+let plane0 = new cv.Mat();
+padded.convertTo(plane0, cv.CV_32F);
+let planes = new cv.MatVector();
+let complexI = new cv.Mat();
+let plane1 = new cv.Mat.zeros(padded.cols, padded.rows, cv.CV_32F);
+planes.push_back(plane0);
+planes.push_back(plane1);
+cv.merge(planes, complexI);
+
+// in-place dft transfrom
+cv.dft(complexI, complexI);
+
+// compute log(1 + sqrt(Re(DFT(img))**2 + Im(DFT(img))**2))
+cv.split(complexI, planes);
+cv.magnitude(planes.get(0), planes.get(1), planes.get(0));
+let mag = planes.get(0);
+let m1 = new cv.Mat.ones(mag.rows, mag.cols, mag.type());
+cv.add(mag, m1, mag);
+cv.log(mag, mag);
+
+// crop the spectrum, if it has an odd number of rows or columns
+let rect = new cv.Rect(0, 0, mag.cols & -2, mag.rows & -2);
+mag = mag.roi(rect);
+
+// rearrange the quadrants of Fourier image
+// so that the origin is at the image center
+let cx = mag.cols / 2;
+let cy = mag.rows / 2;
+let tmp = new cv.Mat();
+
+let rect0 = new cv.Rect(0, 0, cx, cy);
+let rect1 = new cv.Rect(cx, 0, cx, cy);
+let rect2 = new cv.Rect(0, cy, cx, cy);
+let rect3 = new cv.Rect(cx, cy, cx, cy);
+
+let q0 = mag.roi(rect0);
+let q1 = mag.roi(rect1);
+let q2 = mag.roi(rect2);
+let q3 = mag.roi(rect3);
+
+// exchange 1 and 4 quadrants
+q0.copyTo(tmp);
+q3.copyTo(q0);
+tmp.copyTo(q3);
+
+// exchange 2 and 3 quadrants
+q1.copyTo(tmp);
+q2.copyTo(q1);
+tmp.copyTo(q2);
+
+// The pixel value of cv.CV_32S type image ranges from 0 to 1.
+cv.normalize(mag, mag, 0, 1, cv.NORM_MINMAX);
+
+cv.imshow('canvasOutput', mag);
+src.delete(); padded.delete(); planes.delete(); complexI.delete(); m1.delete(); tmp.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Get Affine Transform Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Get Affine Transform Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+// (data32F[0], data32F[1]) is the first point
+// (data32F[2], data32F[3]) is the sescond point
+// (data32F[4], data32F[5]) is the third point
+let srcTri = cv.matFromArray(3, 1, cv.CV_32FC2, [0, 0, 0, 1, 1, 0]);
+let dstTri = cv.matFromArray(3, 1, cv.CV_32FC2, [0.6, 0.2, 0.1, 1.3, 1.5, 0.3]);
+let dsize = new cv.Size(src.rows, src.cols);
+let M = cv.getAffineTransform(srcTri, dstTri);
+// You can try more different parameters
+cv.warpAffine(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete(); srcTri.delete(); dstTri.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Resize Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Resize Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let dsize = new cv.Size(300, 300);
+// You can try more different parameters
+cv.resize(src, dst, dsize, 0, 0, cv.INTER_AREA);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Rotate Transform Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Rotate Transform Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let dsize = new cv.Size(src.rows, src.cols);
+let center = new cv.Point(src.cols / 2, src.rows / 2);
+// You can try more different parameters
+let M = cv.getRotationMatrix2D(center, 45, 1);
+cv.warpAffine(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Affine Transform Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Affine Transform Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let M = cv.matFromArray(2, 3, cv.CV_64FC1, [1, 0, 50, 0, 1, 100]);
+let dsize = new cv.Size(src.rows, src.cols);
+// You can try more different parameters
+cv.warpAffine(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Perspectiv Transform Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Perspectiv Transform Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let dsize = new cv.Size(src.rows, src.cols);
+// (data32F[0], data32F[1]) is the first point
+// (data32F[2], data32F[3]) is the sescond point
+// (data32F[4], data32F[5]) is the third point
+// (data32F[6], data32F[7]) is the fourth point
+let srcTri = cv.matFromArray(4, 1, cv.CV_32FC2, [56, 65, 368, 52, 28, 387, 389, 390]);
+let dstTri = cv.matFromArray(4, 1, cv.CV_32FC2, [0, 0, 300, 0, 0, 300, 300, 300]);
+let M = cv.getPerspectiveTransform(srcTri, dstTri);
+// You can try more different parameters
+cv.warpPerspective(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete(); srcTri.delete(); dstTri.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image GrabCut Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image GrabCut Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+cv.cvtColor(src, src, cv.COLOR_RGBA2RGB, 0);
+let mask = new cv.Mat();
+let bgdModel = new cv.Mat();
+let fgdModel = new cv.Mat();
+let rect = new cv.Rect(50, 50, 260, 280);
+cv.grabCut(src, mask, rect, bgdModel, fgdModel, 1, cv.GC_INIT_WITH_RECT);
+// draw foreground
+for (let i = 0; i < src.rows; i++) {
+ for (let j = 0; j < src.cols; j++) {
+ if (mask.ucharPtr(i, j)[0] == 0 || mask.ucharPtr(i, j)[0] == 2) {
+ src.ucharPtr(i, j)[0] = 0;
+ src.ucharPtr(i, j)[1] = 0;
+ src.ucharPtr(i, j)[2] = 0;
+ }
+ }
+}
+// draw grab rect
+let color = new cv.Scalar(0, 0, 255);
+let point1 = new cv.Point(rect.x, rect.y);
+let point2 = new cv.Point(rect.x + rect.width, rect.y + rect.height);
+cv.rectangle(src, point1, point2, color);
+cv.imshow('canvasOutput', src);
+src.delete(); mask.delete(); bgdModel.delete(); fgdModel.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Laplacian Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Laplacian Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
+// You can try more different parameters
+cv.Laplacian(src, dst, cv.CV_8U, 1, 1, 0, cv.BORDER_DEFAULT);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Sobel Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Sobel Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b>, <b>canvasOutputx</b> and <b>canvasOutputy</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutputx"></canvas>
+ <div class="caption">canvasOutputx</div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutputy"></canvas>
+ <div class="caption">canvasOutputy</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dstx = new cv.Mat();
+let dsty = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
+// You can try more different parameters
+cv.Sobel(src, dstx, cv.CV_8U, 1, 0, 3, 1, 0, cv.BORDER_DEFAULT);
+cv.Sobel(src, dsty, cv.CV_8U, 0, 1, 3, 1, 0, cv.BORDER_DEFAULT);
+// cv.Scharr(src, dstx, cv.CV_8U, 1, 0, 1, 0, cv.BORDER_DEFAULT);
+// cv.Scharr(src, dsty, cv.CV_8U, 0, 1, 1, 0, cv.BORDER_DEFAULT);
+cv.imshow('canvasOutputx', dstx);
+cv.imshow('canvasOutputy', dsty);
+src.delete(); dstx.delete(); dsty.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image AbsSobel Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image AbsSobel Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b>, <b>canvasOutput8U</b> and <b>canvasOutput64F</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput8U"></canvas>
+ <div class="caption">canvasOutput8U</div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput64F"></canvas>
+ <div class="caption">canvasOutput64F</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dstx = new cv.Mat();
+let absDstx = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
+// You can try more different parameters
+cv.Sobel(src, dstx, cv.CV_8U, 1, 0, 3, 1, 0, cv.BORDER_DEFAULT);
+cv.Sobel(src, absDstx, cv.CV_64F, 1, 0, 3, 1, 0, cv.BORDER_DEFAULT);
+cv.convertScaleAbs(absDstx, absDstx, 1, 0);
+cv.imshow('canvasOutput8U', dstx);
+cv.imshow('canvasOutput64F', absDstx);
+src.delete(); dstx.delete(); absDstx.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Back Project Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Back Project Example</h2>
+<p>
+ <canvas> elements named <b>srcCanvasInput</b>, <b>dstCanvasInput</b> and <b>canvasInput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="srcCanvasInput"></canvas>
+ <div class="caption">srcCanvasInput <input type="file" id="srcFileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="dstCanvasInput"></canvas>
+ <div class="caption">dstCanvasInput <input type="file" id="dstFileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('srcCanvasInput');
+let dst = cv.imread('dstCanvasInput');
+cv.cvtColor(src, src, cv.COLOR_RGB2HSV, 0);
+cv.cvtColor(dst, dst, cv.COLOR_RGB2HSV, 0);
+let srcVec = new cv.MatVector();
+let dstVec = new cv.MatVector();
+srcVec.push_back(src); dstVec.push_back(dst);
+let backproj = new cv.Mat();
+let none = new cv.Mat();
+let mask = new cv.Mat();
+let hist = new cv.Mat();
+let channels = [0];
+let histSize = [50];
+let ranges = [0, 180];
+let accumulate = false;
+cv.calcHist(srcVec, channels, mask, hist, histSize, ranges, accumulate);
+cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX, -1, none);
+cv.calcBackProject(dstVec, channels, hist, backproj, ranges, 1);
+cv.imshow('canvasOutput', backproj);
+src.delete(); dst.delete(); srcVec.delete(); dstVec.delete();
+backproj.delete(); mask.delete(); hist.delete(); none.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('handSrc.jpg', 'srcCanvasInput');
+utils.loadImageToCanvas('handDst.jpg', 'dstCanvasInput');
+utils.addFileInputHandler('srcFileInput', 'srcCanvasInput');
+utils.addFileInputHandler('dstFileInput', 'dstCanvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Histogram Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Histogram Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+let srcVec = new cv.MatVector();
+srcVec.push_back(src);
+let accumulate = false;
+let channels = [0];
+let histSize = [256];
+let ranges = [0, 255];
+let hist = new cv.Mat();
+let mask = new cv.Mat();
+let color = new cv.Scalar(255, 255, 255);
+let scale = 2;
+// You can try more different parameters
+cv.calcHist(srcVec, channels, mask, hist, histSize, ranges, accumulate);
+let result = cv.minMaxLoc(hist, mask);
+let max = result.maxVal;
+let dst = new cv.Mat.zeros(src.rows, histSize[0] * scale,
+ cv.CV_8UC3);
+// draw histogram
+for (let i = 0; i < histSize[0]; i++) {
+ let binVal = hist.data32F[i] * src.rows / max;
+ let pioint1 = new cv.Point(i * scale, src.rows - 1);
+ let pioint2 = new cv.Point((i + 1) * scale - 1, src.rows - binVal);
+ cv.rectangle(dst, pioint1, pioint2, color, cv.FILLED);
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); srcVec.delete(); mask.delete(); hist.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image CLAHE Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image CLAHE Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let equalDst = new cv.Mat();
+let claheDst = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.equalizeHist(src, equalDst);
+let tileGridSize = new cv.Size(8, 8);
+// You can try more different parameters
+let clahe = new cv.CLAHE(40, tileGridSize);
+clahe.apply(src, claheDst);
+cv.imshow('canvasOutput', equalDst);
+cv.imshow('canvasOutput', claheDst);
+src.delete(); equalDst.delete(); claheDst.delete(); clahe.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Equalize Histogram Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Equalize Histogram Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.equalizeHist(src, dst);
+cv.imshow('canvasOutput', src);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Hough Circles Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Hough Circles Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8U);
+let circles = new cv.Mat();
+let color = new cv.Scalar(255, 0, 0);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+// You can try more different parameters
+cv.HoughCircles(src, circles, cv.HOUGH_GRADIENT,
+ 1, 45, 75, 40, 0, 0);
+// draw circles
+for (let i = 0; i < circles.cols; ++i) {
+ let x = circles.data32F[i * 3];
+ let y = circles.data32F[i * 3 + 1];
+ let radius = circles.data32F[i * 3 + 2];
+ let center = new cv.Point(x, y);
+ cv.circle(dst, center, radius, color);
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); circles.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('coins.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Hough Lines Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Hough Lines Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8U);
+let lines = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.Canny(src, src, 50, 200, 3);
+// You can try more different parameters
+cv.HoughLines(src, lines, 1, Math.PI / 180,
+ 30, 0, 0, 0, Math.PI);
+// draw lines
+for (let i = 0; i < lines.rows; ++i) {
+ let rho = lines.data32F[i * 2];
+ let theta = lines.data32F[i * 2 + 1];
+ let a = Math.cos(theta);
+ let b = Math.sin(theta);
+ let x0 = a * rho;
+ let y0 = b * rho;
+ let startPoint = {x: x0 - 1000 * b, y: y0 + 1000 * a};
+ let endPoint = {x: x0 + 1000 * b, y: y0 - 1000 * a};
+ cv.line(dst, startPoint, endPoint, [255, 0, 0, 255]);
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); lines.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image HoughLinesP Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image HoughLinesP Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8U);
+let lines = new cv.Mat();
+let color = new cv.Scalar(255, 0, 0);
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+cv.Canny(src, src, 50, 200, 3);
+// You can try more different parameters
+cv.HoughLinesP(src, lines, 1, Math.PI / 180, 2, 0, 0);
+// draw lines
+for (let i = 0; i < lines.rows; ++i) {
+ let startPoint = new cv.Point(lines.data32S[i * 4], lines.data32S[i * 4 + 1]);
+ let endPoint = new cv.Point(lines.data32S[i * 4 + 2], lines.data32S[i * 4 + 3]);
+ cv.line(dst, startPoint, endPoint, color);
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); lines.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Bitwise Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Bitwise Example</h2>
+<p>
+ <canvas> elements named <b>imageCanvasInput</b>, <b>logoCanvasInput</b> and <b>CanvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="imageCanvasInput"></canvas>
+ <div class="caption">imageCanvasInput <input type="file" id="imageFileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="logoCanvasInput"></canvas>
+ <div class="caption">logoCanvasInput <input type="file" id="logoFileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('imageCanvasInput');
+let logo = cv.imread('logoCanvasInput');
+let dst = new cv.Mat();
+let roi = new cv.Mat();
+let mask = new cv.Mat();
+let maskInv = new cv.Mat();
+let imgBg = new cv.Mat();
+let imgFg = new cv.Mat();
+let sum = new cv.Mat();
+let rect = new cv.Rect(0, 0, logo.cols, logo.rows);
+
+// I want to put logo on top-left corner, So I create a ROI
+roi = src.roi(rect);
+
+// Create a mask of logo and create its inverse mask also
+cv.cvtColor(logo, mask, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(mask, mask, 100, 255, cv.THRESH_BINARY);
+cv.bitwise_not(mask, maskInv);
+
+// Black-out the area of logo in ROI
+cv.bitwise_and(roi, roi, imgBg, maskInv);
+
+// Take only region of logo from logo image
+cv.bitwise_and(logo, logo, imgFg, mask);
+
+// Put logo in ROI and modify the main image
+cv.add(imgBg, imgFg, sum);
+
+dst = src.clone();
+for (let i = 0; i < logo.rows; i++) {
+ for (let j = 0; j < logo.cols; j++) {
+ dst.ucharPtr(i, j)[0] = sum.ucharPtr(i, j)[0];
+ }
+}
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); logo.delete(); roi.delete(); mask.delete();
+maskInv.delete(); imgBg.delete(); imgFg.delete(); sum.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'imageCanvasInput');
+utils.loadImageToCanvas('lenaFace.png', 'logoCanvasInput');
+utils.addFileInputHandler('imageFileInput', 'imageCanvasInput');
+utils.addFileInputHandler('logoFileInput', 'logoCanvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Read and Show Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Read and Show Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="8" cols="80" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+// To distinguish the input and output, we graying the image.
+// You can try different conversions.
+cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
+cv.imshow('canvasOutput', dst);
+src.delete();
+dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html >
+<head>
+<meta charset="utf-8">
+<title>Image Processing Video Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+<style type="text/css">
+.dg {
+ text-align: left;
+}
+.dg .property-name {
+ font: 11px Lucida Grande,sans-serif;
+ line-height: 27px;
+}
+.dg.main .close-button {
+ font: 11px Lucida Grande,sans-serif;
+ line-height: 27px;
+}
+.cell-top {
+ vertical-align: top;
+}
+</style>
+</head>
+<body>
+<h2>Image Processing Video Example</h2>
+<p>
+ Open the controls and try different image processing filters.
+</p>
+<p class="err" id="errorMessage"></p>
+<div id="container">
+ <table>
+ <tr>
+ <td></td>
+ <td>
+ <div>
+ <span>Current Filter: </span><span id="filterName">Pass Through</span>
+ </div>
+ </td>
+ <td>
+ <div>Select Filter:</div>
+ </td>
+ <td></td>
+ </tr>
+ <tr>
+ <td></td>
+ <td class="cell-top">
+ <canvas id="canvasOutput" width="640" height="480"></canvas>
+ </td>
+ <td class="cell-top">
+ <div id="guiContainer"></div>
+ </td>
+ <td></td>
+ </tr>
+ </table>
+ <div>
+ <video id="videoInput" class="hidden">Your browser does not support the video tag.</video>
+ </div>
+</div>
+<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
+<script src="https://cdnjs.cloudflare.com/ajax/libs/stats.js/r16/Stats.min.js" type="text/javascript"></script>
+<script src="https://cdnjs.cloudflare.com/ajax/libs/dat-gui/0.6.4/dat.gui.min.js" type="text/javascript"></script>
+<script src="utils.js" type="text/javascript"></script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+let width = 0;
+let height = 0;
+
+let resolution = window.innerWidth < 960 ? 'qvga' : 'vga';
+
+// whether streaming video from the camera.
+let streaming = false;
+
+let video = document.getElementById('videoInput');
+let vc = null;
+
+let container = document.getElementById('container');
+
+let lastFilter = '';
+let src = null;
+let dstC1 = null;
+let dstC3 = null;
+let dstC4 = null;
+
+function startVideoProcessing() {
+ src = new cv.Mat(height, width, cv.CV_8UC4);
+ dstC1 = new cv.Mat(height, width, cv.CV_8UC1);
+ dstC3 = new cv.Mat(height, width, cv.CV_8UC3);
+ dstC4 = new cv.Mat(height, width, cv.CV_8UC4);
+ requestAnimationFrame(processVideo);
+}
+
+function passThrough(src) {
+ return src;
+}
+
+function gray(src) {
+ cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
+ return dstC1;
+}
+
+function hsv(src) {
+ cv.cvtColor(src, dstC3, cv.COLOR_RGBA2RGB);
+ cv.cvtColor(dstC3, dstC3, cv.COLOR_RGB2HSV);
+ return dstC3;
+}
+
+function canny(src) {
+ cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
+ cv.Canny(dstC1, dstC1, controls.cannyThreshold1, controls.cannyThreshold2,
+ controls.cannyApertureSize, controls.cannyL2Gradient);
+ return dstC1;
+}
+
+function inRange(src) {
+ let lowValue = controls.inRangeLow;
+ let lowScalar = new cv.Scalar(lowValue, lowValue, lowValue, 255);
+ let highValue = controls.inRangeHigh;
+ let highScalar = new cv.Scalar(highValue, highValue, highValue, 255);
+ let low = new cv.Mat(height, width, src.type(), lowScalar);
+ let high = new cv.Mat(height, width, src.type(), highScalar);
+ cv.inRange(src, low, high, dstC1);
+ low.delete(); high.delete();
+ return dstC1;
+}
+
+function threshold(src) {
+ cv.threshold(src, dstC4, controls.thresholdValue, 200, cv.THRESH_BINARY);
+ return dstC4;
+}
+
+function adaptiveThreshold(src) {
+ let mat = new cv.Mat(height, width, cv.CV_8U);
+ cv.cvtColor(src, mat, cv.COLOR_RGBA2GRAY);
+ cv.adaptiveThreshold(mat, dstC1, 200, cv.ADAPTIVE_THRESH_GAUSSIAN_C,
+ cv.THRESH_BINARY, Number(controls.adaptiveBlockSize), 2);
+ mat.delete();
+ return dstC1;
+}
+
+function gaussianBlur(src) {
+ cv.GaussianBlur(src, dstC4,
+ {width: controls.gaussianBlurSize, height: controls.gaussianBlurSize},
+ 0, 0, cv.BORDER_DEFAULT);
+ return dstC4;
+}
+
+function bilateralFilter(src) {
+ let mat = new cv.Mat(height, width, cv.CV_8UC3);
+ cv.cvtColor(src, mat, cv.COLOR_RGBA2RGB);
+ cv.bilateralFilter(mat, dstC3, controls.bilateralFilterDiameter, controls.bilateralFilterSigma,
+ controls.bilateralFilterSigma, cv.BORDER_DEFAULT);
+ mat.delete();
+ return dstC3;
+}
+
+function medianBlur(src) {
+ cv.medianBlur(src, dstC4, controls.medianBlurSize);
+ return dstC4;
+}
+
+function sobel(src) {
+ let mat = new cv.Mat(height, width, cv.CV_8UC1);
+ cv.cvtColor(src, mat, cv.COLOR_RGB2GRAY, 0);
+ cv.Sobel(mat, dstC1, cv.CV_8U, 1, 0, controls.sobelSize, 1, 0, cv.BORDER_DEFAULT);
+ mat.delete();
+ return dstC1;
+}
+
+function scharr(src) {
+ let mat = new cv.Mat(height, width, cv.CV_8UC1);
+ cv.cvtColor(src, mat, cv.COLOR_RGB2GRAY, 0);
+ cv.Scharr(mat, dstC1, cv.CV_8U, 1, 0, 1, 0, cv.BORDER_DEFAULT);
+ mat.delete();
+ return dstC1;
+}
+
+function laplacian(src) {
+ let mat = new cv.Mat(height, width, cv.CV_8UC1);
+ cv.cvtColor(src, mat, cv.COLOR_RGB2GRAY);
+ cv.Laplacian(mat, dstC1, cv.CV_8U, controls.laplacianSize, 1, 0, cv.BORDER_DEFAULT);
+ mat.delete();
+ return dstC1;
+}
+
+let contoursColor = [];
+for (let i = 0; i < 10000; i++) {
+ contoursColor.push([Math.round(Math.random() * 255),
+ Math.round(Math.random() * 255),
+ Math.round(Math.random() * 255), 0]);
+}
+
+function contours(src) {
+ cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
+ cv.threshold(dstC1, dstC4, 120, 200, cv.THRESH_BINARY);
+ let contours = new cv.MatVector();
+ let hierarchy = new cv.Mat();
+ cv.findContours(dstC4, contours, hierarchy,
+ Number(controls.contoursMode),
+ Number(controls.contoursMethod), {x: 0, y: 0});
+ dstC3.delete();
+ dstC3 = cv.Mat.ones(height, width, cv.CV_8UC3);
+ for (let i = 0; i<contours.size(); ++i) {
+ let color = contoursColor[i];
+ cv.drawContours(dstC3, contours, i, color, 1, cv.LINE_8, hierarchy);
+ }
+ contours.delete(); hierarchy.delete();
+ return dstC3;
+}
+
+function calcHist(src) {
+ cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
+ let srcVec = new cv.MatVector();
+ srcVec.push_back(dstC1);
+ let scale = 2;
+ let channels = [0];
+ let histSize = [src.cols/scale];
+ const ranges = [0, 255];
+ let hist = new cv.Mat();
+ let mask = new cv.Mat();
+ let color = new cv.Scalar(0xfb, 0xca, 0x04, 0xff);
+ cv.calcHist(srcVec, channels, mask, hist, histSize, ranges);
+ let result = cv.minMaxLoc(hist, mask);
+ let max = result.maxVal;
+ cv.cvtColor(dstC1, dstC4, cv.COLOR_GRAY2RGBA);
+ // draw histogram on src
+ for (let i = 0; i < histSize[0]; i++) {
+ let binVal = hist.data32F[i] * src.rows / max;
+ cv.rectangle(dstC4, {x: i * scale, y: src.rows - 1},
+ {x: (i + 1) * scale - 1, y: src.rows - binVal/3}, color, cv.FILLED);
+ }
+ srcVec.delete();
+ mask.delete();
+ hist.delete();
+ return dstC4;
+}
+
+function equalizeHist(src) {
+ cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY, 0);
+ cv.equalizeHist(dstC1, dstC1);
+ return dstC1;
+}
+
+let base;
+
+function backprojection(src) {
+ if (lastFilter !== 'backprojection') {
+ if (base instanceof cv.Mat) {
+ base.delete();
+ }
+ base = src.clone();
+ cv.cvtColor(base, base, cv.COLOR_RGB2HSV, 0);
+ }
+ cv.cvtColor(src, dstC3, cv.COLOR_RGB2HSV, 0);
+ let baseVec = new cv.MatVector();
+ let targetVec = new cv.MatVector();
+ baseVec.push_back(base); targetVec.push_back(dstC3);
+ let mask = new cv.Mat();
+ let hist = new cv.Mat();
+ let channels = [0];
+ let histSize = [50];
+ let ranges;
+ if (controls.backprojectionRangeLow < controls.backprojectionRangeHigh) {
+ ranges = [controls.backprojectionRangeLow, controls.backprojectionRangeHigh];
+ } else {
+ return src;
+ }
+ cv.calcHist(baseVec, channels, mask, hist, histSize, ranges);
+ cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX);
+ cv.calcBackProject(targetVec, channels, hist, dstC1, ranges, 1);
+ baseVec.delete();
+ targetVec.delete();
+ mask.delete();
+ hist.delete();
+ return dstC1;
+}
+
+function erosion(src) {
+ let kernelSize = controls.erosionSize;
+ let kernel = cv.Mat.ones(kernelSize, kernelSize, cv.CV_8U);
+ let color = new cv.Scalar();
+ cv.erode(src, dstC4, kernel, {x: -1, y: -1}, 1, Number(controls.erosionBorderType), color);
+ kernel.delete();
+ return dstC4;
+}
+
+function dilation(src) {
+ let kernelSize = controls.dilationSize;
+ let kernel = cv.Mat.ones(kernelSize, kernelSize, cv.CV_8U);
+ let color = new cv.Scalar();
+ cv.dilate(src, dstC4, kernel, {x: -1, y: -1}, 1, Number(controls.dilationBorderType), color);
+ kernel.delete();
+ return dstC4;
+}
+
+function morphology(src) {
+ let kernelSize = controls.morphologySize;
+ let kernel = cv.getStructuringElement(Number(controls.morphologyShape),
+ {width: kernelSize, height: kernelSize});
+ let color = new cv.Scalar();
+ let op = Number(controls.morphologyOp);
+ let image = src;
+ if (op === cv.MORPH_GRADIENT || op === cv.MORPH_TOPHAT || op === cv.MORPH_BLACKHAT) {
+ cv.cvtColor(src, dstC3, cv.COLOR_RGBA2RGB);
+ image = dstC3;
+ }
+ cv.morphologyEx(image, dstC4, op, kernel, {x: -1, y: -1}, 1,
+ Number(controls.morphologyBorderType), color);
+ kernel.delete();
+ return dstC4;
+}
+
+function processVideo() {
+ if (!streaming) return;
+ stats.begin();
+ vc.read(src);
+ let result;
+ switch (controls.filter) {
+ case 'passThrough': result = passThrough(src); break;
+ case 'gray': result = gray(src); break;
+ case 'hsv': result = hsv(src); break;
+ case 'canny': result = canny(src); break;
+ case 'inRange': result = inRange(src); break;
+ case 'threshold': result = threshold(src); break;
+ case 'adaptiveThreshold': result = adaptiveThreshold(src); break;
+ case 'gaussianBlur': result = gaussianBlur(src); break;
+ case 'bilateralFilter': result = bilateralFilter(src); break;
+ case 'medianBlur': result = medianBlur(src); break;
+ case 'sobel': result = sobel(src); break;
+ case 'scharr': result = scharr(src); break;
+ case 'laplacian': result = laplacian(src); break;
+ case 'contours': result = contours(src); break;
+ case 'calcHist': result = calcHist(src); break;
+ case 'equalizeHist': result = equalizeHist(src); break;
+ case 'backprojection': result = backprojection(src); break;
+ case 'erosion': result = erosion(src); break;
+ case 'dilation': result = dilation(src); break;
+ case 'morphology': result = morphology(src); break;
+ default: result = passThrough(src);
+ }
+ cv.imshow('canvasOutput', result);
+ stats.end();
+ lastFilter = controls.filter;
+ requestAnimationFrame(processVideo);
+}
+
+let stats = null;
+
+let filters = {
+ 'passThrough': 'Pass Through',
+ 'gray': 'Gray',
+ 'hsv': 'HSV',
+ 'canny': 'Canny Edge Detection',
+ 'inRange': 'In Range',
+ 'threshold': 'Threshold',
+ 'adaptiveThreshold': 'Adaptive Threshold',
+ 'gaussianBlur': 'Gaussian Blurring',
+ 'medianBlur': 'Median Blurring',
+ 'bilateralFilter': 'Bilateral Filtering',
+ 'sobel': 'Sobel Derivatives',
+ 'scharr': 'Scharr Derivatives',
+ 'laplacian': 'Laplacian Derivatives',
+ 'contours': 'Contours',
+ 'calcHist': 'Calculation',
+ 'equalizeHist': 'Equalization',
+ 'backprojection': 'Backprojection',
+ 'erosion': 'Erosion',
+ 'dilation': 'Dilation',
+ 'morphology': 'Morphology',
+};
+
+let filterName = document.getElementById('filterName');
+
+let controls;
+
+function initUI() {
+ stats = new Stats();
+ stats.showPanel(0);
+ container.appendChild(stats.domElement);
+ stats.domElement.style.position = 'absolute';
+ stats.domElement.style.right = '0px';
+ stats.domElement.style.top = '0px';
+
+ controls = {
+ filter: 'passThrough',
+ setFilter: function(filter) {
+ this.filter = filter;
+ filterName.innerHTML = filters[filter];
+ },
+ passThrough: function() {
+ this.setFilter('passThrough');
+ },
+ gray: function() {
+ this.setFilter('gray');
+ },
+ hsv: function() {
+ this.setFilter('hsv');
+ },
+ inRange: function() {
+ this.setFilter('inRange');
+ },
+ inRangeLow: 75,
+ inRangeHigh: 150,
+ threshold: function() {
+ this.setFilter('threshold');
+ },
+ thresholdValue: 100,
+ adaptiveThreshold: function() {
+ this.setFilter('adaptiveThreshold');
+ },
+ adaptiveBlockSize: 3,
+ gaussianBlur: function() {
+ this.setFilter('gaussianBlur');
+ },
+ gaussianBlurSize: 7,
+ medianBlur: function() {
+ this.setFilter('medianBlur');
+ },
+ medianBlurSize: 5,
+ bilateralFilter: function() {
+ this.setFilter('bilateralFilter');
+ },
+ bilateralFilterDiameter: 5,
+ bilateralFilterSigma: 75,
+ sobel: function() {
+ this.setFilter('sobel');
+ },
+ sobelSize: 3,
+ scharr: function() {
+ this.setFilter('scharr');
+ },
+ laplacian: function() {
+ this.setFilter('laplacian');
+ },
+ laplacianSize: 3,
+ canny: function() {
+ this.setFilter('canny');
+ },
+ cannyThreshold1: 150,
+ cannyThreshold2: 300,
+ cannyApertureSize: 3,
+ cannyL2Gradient: false,
+ contours: function() {
+ this.setFilter('contours');
+ },
+ contoursMode: cv.RETR_CCOMP,
+ contoursMethod: cv.CHAIN_APPROX_SIMPLE,
+ calcHist: function() {
+ this.setFilter('calcHist');
+ },
+ equalizeHist: function() {
+ this.setFilter('equalizeHist');
+ },
+ backprojection: function() {
+ this.setFilter('backprojection');
+ },
+ backprojectionRangeLow: 0,
+ backprojectionRangeHigh: 150,
+ morphology: function() {
+ this.setFilter('morphology');
+ },
+ morphologyShape: cv.MORPH_RECT,
+ morphologyOp: cv.MORPH_ERODE,
+ morphologySize: 5,
+ morphologyBorderType: cv.BORDER_CONSTANT,
+ };
+
+ let gui = new dat.GUI({autoPlace: false});
+ let guiContainer = document.getElementById('guiContainer');
+ guiContainer.appendChild(gui.domElement);
+
+ let lastFolder = null;
+ function closeLastFolder(folder) {
+ if (lastFolder != null && lastFolder != folder) {
+ lastFolder.close();
+ }
+ lastFolder = folder;
+ }
+
+ gui.add(controls, 'passThrough').name(filters['passThrough']).onChange(function() {
+ closeLastFolder(null);
+ });
+
+ let colorConversion = gui.addFolder('Color Conversion');
+ colorConversion.add(controls, 'gray').name(filters['gray']).onChange(function() {
+ closeLastFolder(null);
+ });
+
+ colorConversion.add(controls, 'hsv').name(filters['hsv']).onChange(function() {
+ closeLastFolder(null);
+ });
+
+ let inRange = colorConversion.addFolder(filters['inRange']);
+ inRange.domElement.onclick = function() {
+ closeLastFolder(inRange);
+ controls.inRange();
+ };
+ inRange.add(controls, 'inRangeLow', 0, 255, 1).name('lower boundary');
+ inRange.add(controls, 'inRangeHigh', 0, 255, 1).name('higher boundary');
+
+ // let geometricTransformations = gui.addFolder('Geometric Transformations');
+ // TODO
+
+ let thresholding = gui.addFolder('Thresholding');
+
+ let threshold = thresholding.addFolder(filters['threshold']);
+ threshold.domElement.onclick = function() {
+ closeLastFolder(threshold);
+ controls.threshold();
+ };
+ threshold.add(controls, 'thresholdValue', 0, 200, 1).name('threshold value');
+
+ let adaptiveThreshold = thresholding.addFolder(filters['adaptiveThreshold']);
+ adaptiveThreshold.domElement.onclick = function() {
+ closeLastFolder(adaptiveThreshold);
+ controls.adaptiveThreshold();
+ };
+ adaptiveThreshold.add(
+ controls, 'adaptiveBlockSize', 3, 99, 1).name('block size').onChange(
+ function(value) {
+ if (value % 2 === 0) controls.adaptiveBlockSize = value + 1;
+ });
+
+ let smoothing = gui.addFolder('Smoothing');
+
+ let gaussianBlur = smoothing.addFolder(filters['gaussianBlur']);
+ gaussianBlur.domElement.onclick = function() {
+ closeLastFolder(gaussianBlur);
+ controls.gaussianBlur();
+ };
+ gaussianBlur.add(
+ controls, 'gaussianBlurSize', 7, 99, 1).name('kernel size').onChange(
+ function(value) {
+ if (value % 2 === 0) controls.gaussianBlurSize = value + 1;
+ });
+
+ let medianBlur = smoothing.addFolder(filters['medianBlur']);
+ medianBlur.domElement.onclick = function() {
+ closeLastFolder(medianBlur);
+ controls.medianBlur();
+ };
+ medianBlur.add(
+ controls, 'medianBlurSize', 3, 99, 1).name('kernel size').onChange(
+ function(value) {
+ if (value % 2 === 0) controls.medianBlurSize = value + 1;
+ });
+
+ let bilateralFilter = smoothing.addFolder(filters['bilateralFilter']);
+ bilateralFilter.domElement.onclick = function() {
+ closeLastFolder(bilateralFilter);
+ controls.bilateralFilter();
+ };
+ bilateralFilter.add(controls, 'bilateralFilterDiameter', 1, 15, 1).name('diameter');
+ bilateralFilter.add(controls, 'bilateralFilterSigma', 1, 255, 1).name('sigma');
+
+ let morphology = gui.addFolder('Morphology');
+ morphology.domElement.onclick = function() {
+ closeLastFolder(morphology);
+ controls.morphology();
+ };
+ morphology.add(
+ controls, 'morphologyOp',
+ {'MORPH_ERODE': cv.MORPH_ERODE,
+ 'MORPH_DILATE': cv.MORPH_DILATE,
+ 'MORPH_OPEN ': cv.MORPH_OPEN,
+ 'MORPH_CLOSE': cv.MORPH_CLOSE,
+ 'MORPH_GRADIENT': cv.MORPH_GRADIENT,
+ 'MORPH_TOPHAT': cv.MORPH_TOPHAT,
+ 'MORPH_BLACKHAT': cv.MORPH_BLACKHAT}).name('operation');
+ morphology.add(
+ controls, 'morphologyShape',
+ {'MORPH_RECT': cv.MORPH_RECT,
+ 'MORPH_CROSS': cv.MORPH_CROSS,
+ 'MORPH_ELLIPSE': cv.MORPH_ELLIPSE}).name('shape');
+ morphology.add(
+ controls, 'morphologySize', 1, 15, 1).name('kernel size').onChange(
+ function(value) {
+ if (value % 2 === 0) controls.morphologySize = value + 1;
+ });
+ morphology.add(
+ controls, 'morphologyBorderType',
+ {'BORDER_CONSTANT': cv.BORDER_CONSTANT,
+ 'BORDER_REPLICATE': cv.BORDER_REPLICATE,
+ 'BORDER_REFLECT': cv.BORDER_REFLECT,
+ 'BORDER_REFLECT_101': cv.BORDER_REFLECT_101}).name('boarder type');
+
+ let gradients = gui.addFolder('Gradients');
+ let sobel = gradients.addFolder(filters['sobel']);
+ sobel.domElement.onclick = function() {
+ closeLastFolder(sobel);
+ controls.sobel();
+ };
+ sobel.add(controls, 'sobelSize', 3, 19, 1).name('kernel size').onChange(function(value) {
+ if (value % 2 === 0) controls.sobelSize = value + 1;
+ });
+
+ gradients.add(controls, 'scharr').name(filters['scharr']).onChange(function() {
+ closeLastFolder(null);
+ });
+
+ let laplacian = gradients.addFolder(filters['laplacian']);
+ laplacian.domElement.onclick = function() {
+ closeLastFolder(laplacian);
+ controls.laplacian();
+ };
+ laplacian.add(
+ controls, 'laplacianSize', 1, 19, 1).name('kernel size').onChange(
+ function(value) {
+ if (value % 2 === 0) controls.laplacianSize = value + 1;
+ });
+
+ let canny = gui.addFolder(filters['canny']);
+ canny.domElement.onclick = function() {
+ closeLastFolder(canny);
+ controls.canny();
+ };
+ canny.add(controls, 'cannyThreshold1', 1, 500, 1).name('threshold1');
+ canny.add(controls, 'cannyThreshold2', 1, 500, 1).name('threshold2');
+ canny.add(controls, 'cannyApertureSize', 3, 7, 1).name('aperture size').onChange(
+ function(value) {
+ if (value % 2 === 0) controls.cannyApertureSize = value + 1;
+ });
+ canny.add(controls, 'cannyL2Gradient').name('l2 gradient');
+
+ let contours = gui.addFolder(filters['contours']);
+ contours.domElement.onclick = function() {
+ closeLastFolder(contours);
+ controls.contours();
+ };
+ contours.add(
+ controls, 'contoursMode',
+ {'RETR_EXTERNAL': cv.RETR_EXTERNAL,
+ 'RETR_LIST': cv.RETR_LIST,
+ 'RETR_CCOMP': cv.RETR_CCOMP,
+ 'RETR_TREE': cv.RETR_TREE}).name('mode');
+ contours.add(
+ controls, 'contoursMethod',
+ {'CHAIN_APPROX_NONE': cv.CHAIN_APPROX_NONE,
+ 'CHAIN_APPROX_SIMPLE': cv.CHAIN_APPROX_SIMPLE,
+ 'CHAIN_APPROX_TC89_L1': cv.CHAIN_APPROX_TC89_L1,
+ 'CHAIN_APPROX_TC89_KCOS': cv.CHAIN_APPROX_TC89_KCOS}).name('method');
+
+ let histograms = gui.addFolder('Histograms');
+ histograms.add(controls, 'calcHist').name(filters['calcHist']).onChange(function() {
+ closeLastFolder(null);
+ });
+ histograms.add(controls, 'equalizeHist').name(filters['equalizeHist']).onChange(function() {
+ closeLastFolder(null);
+ });
+
+ let backprojection = histograms.addFolder(filters['backprojection']);
+ backprojection.domElement.onclick = function() {
+ closeLastFolder(backprojection);
+ controls.backprojection();
+ };
+ backprojection.add(controls, 'backprojectionRangeLow', 0, 255, 1).name('range low');
+ backprojection.add(controls, 'backprojectionRangeHigh', 0, 255, 1).name('range high');
+}
+
+function startCamera() {
+ if (!streaming) {
+ utils.clearError();
+ utils.startCamera(resolution, onVideoStarted, 'videoInput');
+ } else {
+ utils.stopCamera();
+ onVideoStopped();
+ }
+}
+
+function onVideoStarted() {
+ height = video.videoHeight;
+ width = video.videoWidth;
+ video.setAttribute('width', width);
+ video.setAttribute('height', height);
+ streaming = true;
+ vc = new cv.VideoCapture(video);
+ startVideoProcessing();
+}
+
+function stopVideoProcessing() {
+ if (src != null && !src.isDeleted()) src.delete();
+ if (dstC1 != null && !dstC1.isDeleted()) dstC1.delete();
+ if (dstC3 != null && !dstC3.isDeleted()) dstC3.delete();
+ if (dstC4 != null && !dstC4.isDeleted()) dstC4.delete();
+}
+
+function onVideoStopped() {
+ if (!streaming) return;
+ stopVideoProcessing();
+ document.getElementById('canvasOutput').getContext('2d').clearRect(0, 0, width, height);
+ streaming = false;
+}
+
+utils.loadOpenCv(() => {
+ initUI();
+ startCamera();
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>MeanShift Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>MeanShift Example</h2>
+<p>
+ Click <b>Start/Stop</b> button to start or stop the video.<br>
+ The <b>videoInput</b> is a <video> element used as meanShift input.
+ The <b>canvasOutput</b> is a <canvas> element used as meanShift output.<br>
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+</p>
+<div>
+<div class="control"><button id="startAndStop" disabled>Start</button></div>
+<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+</div>
+<p class="err" id="errorMessage"></p>
+<div>
+ <table cellpadding="0" cellspacing="0" width="0" border="0">
+ <tr>
+ <td>
+ <video id="videoInput" width="320" height="240" muted loop></video>
+ </td>
+ <td>
+ <canvas id="canvasOutput" width="320" height="240" ></canvas>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <div class="caption">videoInput</div>
+ </td>
+ <td>
+ <div class="caption">canvasOutput</div>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ </table>
+</div>
+<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let video = document.getElementById('videoInput');
+let cap = new cv.VideoCapture(video);
+
+// take first frame of the video
+let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+cap.read(frame);
+
+// hardcode the initial location of window
+let trackWindow = new cv.Rect(150, 60, 63, 125);
+
+// set up the ROI for tracking
+let roi = frame.roi(trackWindow);
+let hsvRoi = new cv.Mat();
+cv.cvtColor(roi, hsvRoi, cv.COLOR_RGBA2RGB);
+cv.cvtColor(hsvRoi, hsvRoi, cv.COLOR_RGB2HSV);
+let mask = new cv.Mat();
+let lowScalar = new cv.Scalar(30, 30, 0);
+let highScalar = new cv.Scalar(180, 180, 180);
+let low = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), lowScalar);
+let high = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), highScalar);
+cv.inRange(hsvRoi, low, high, mask);
+let roiHist = new cv.Mat();
+let hsvRoiVec = new cv.MatVector();
+hsvRoiVec.push_back(hsvRoi);
+cv.calcHist(hsvRoiVec, [0], mask, roiHist, [180], [0, 180]);
+cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);
+
+// delete useless mats.
+roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();
+
+// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
+let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);
+
+let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
+let dst = new cv.Mat();
+let hsvVec = new cv.MatVector();
+hsvVec.push_back(hsv);
+
+const FPS = 30;
+function processVideo() {
+ try {
+ if (!streaming) {
+ // clean and stop.
+ frame.delete(); dst.delete(); hsvVec.delete(); roiHist.delete(); hsv.delete();
+ return;
+ }
+ let begin = Date.now();
+
+ // start processing.
+ cap.read(frame);
+ cv.cvtColor(frame, hsv, cv.COLOR_RGBA2RGB);
+ cv.cvtColor(hsv, hsv, cv.COLOR_RGB2HSV);
+ cv.calcBackProject(hsvVec, [0], roiHist, dst, [0, 180], 1);
+
+ // Apply meanshift to get the new location
+ // and it also returns number of iterations meanShift took to converge,
+ // which is useless in this demo.
+ [, trackWindow] = cv.meanShift(dst, trackWindow, termCrit);
+
+ // Draw it on image
+ let [x, y, w, h] = [trackWindow.x, trackWindow.y, trackWindow.width, trackWindow.height];
+ cv.rectangle(frame, new cv.Point(x, y), new cv.Point(x+w, y+h), [255, 0, 0, 255], 2);
+ cv.imshow('canvasOutput', frame);
+
+ // schedule the next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+ } catch (err) {
+ utils.printError(err);
+ }
+};
+
+// schedule the first one.
+setTimeout(processVideo, 0);
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+
+let streaming = false;
+let videoInput = document.getElementById('videoInput');
+let startAndStop = document.getElementById('startAndStop');
+let canvasOutput = document.getElementById('canvasOutput');
+let canvasContext = canvasOutput.getContext('2d');
+
+startAndStop.addEventListener('click', () => {
+ if (!streaming) {
+ utils.clearError();
+ videoInput.play().then(() => {
+ onVideoStarted();
+ });
+ } else {
+ videoInput.pause();
+ videoInput.currentTime = 0;
+ onVideoStopped();
+ }
+});
+
+function onVideoStarted() {
+ streaming = true;
+ startAndStop.innerText = 'Stop';
+ videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
+ utils.executeCode('codeEditor');
+}
+
+function onVideoStopped() {
+ streaming = false;
+ canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
+ startAndStop.innerText = 'Start';
+}
+
+utils.loadOpenCv(() => {
+ videoInput.addEventListener('canplay', () => {
+ startAndStop.removeAttribute('disabled');
+ });
+ videoInput.src = 'cup.mp4';
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Black Hat Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Black Hat Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
+let dst = new cv.Mat();
+let M = cv.Mat.ones(53, 53, cv.CV_8U);
+// You can try more different parameters
+cv.morphologyEx(src, dst, cv.MORPH_BLACKHAT, M);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Closing Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Closing Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let M = cv.Mat.ones(5, 5, cv.CV_8U);
+// You can try more different parameters
+cv.morphologyEx(src, dst, cv.MORPH_CLOSE, M);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Dilate Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Dilate Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let M = cv.Mat.ones(5, 5, cv.CV_8U);
+let anchor = new cv.Point(-1, -1);
+// You can try more different parameters
+cv.dilate(src, dst, M, anchor, 1, cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Erode Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Erode Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let M = cv.Mat.ones(5, 5, cv.CV_8U);
+let anchor = new cv.Point(-1, -1);
+// You can try more different parameters
+cv.erode(src, dst, M, anchor, 1, cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Get Structuring Element Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Get Structuring Element Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
+let dst = new cv.Mat();
+let M = new cv.Mat();
+let ksize = new cv.Size(5, 5);
+// You can try more different parameters
+M = cv.getStructuringElement(cv.MORPH_CROSS, ksize);
+cv.morphologyEx(src, dst, cv.MORPH_GRADIENT, M);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Gradient Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Gradient Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
+let dst = new cv.Mat();
+let M = cv.Mat.ones(5, 5, cv.CV_8U);
+// You can try more different parameters
+cv.morphologyEx(src, dst, cv.MORPH_GRADIENT, M);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Opening Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Opening Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let M = cv.Mat.ones(5, 5, cv.CV_8U);
+let anchor = new cv.Point(-1, -1);
+// You can try more different parameters
+cv.morphologyEx(src, dst, cv.MORPH_OPEN, M, anchor, 1,
+ cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Top Hat Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Top Hat Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
+let dst = new cv.Mat();
+let M = cv.Mat.ones(9, 9, cv.CV_8U);
+// You can try more different parameters
+cv.morphologyEx(src, dst, cv.MORPH_TOPHAT, M);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('shape.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Dense Optical Flow Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Dense Optical Flow Example</h2>
+<p>
+ Click <b>Start/Stop</b> button to start or stop the video.<br>
+ The <b>videoInput</b> is a <video> element used as input.
+ The <b>canvasOutput</b> is a <canvas> element used as output.<br>
+ We get a 2-channel array with optical flow vectors, (u,v). We find their magnitude and direction.
+ We color code the result for better visualization. Direction corresponds to Hue value of the image.
+ Magnitude corresponds to Value plane.<br>
+ The code of <textarea> will be executed when video is started.<br>
+ You can modify the code to investigate more.
+</p>
+<div>
+<div class="control"><button id="startAndStop" disabled>Start</button></div>
+<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+</div>
+<p class="err" id="errorMessage"></p>
+<div>
+ <table cellpadding="0" cellspacing="0" width="0" border="0">
+ <tr>
+ <td>
+ <video id="videoInput" width="320" height="240" muted></video>
+ </td>
+ <td>
+ <canvas id="canvasOutput" width="320" height="240" ></canvas>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <div class="caption">videoInput</div>
+ </td>
+ <td>
+ <div class="caption">canvasOutput</div>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ </table>
+</div>
+<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let video = document.getElementById('videoInput');
+let cap = new cv.VideoCapture(video);
+
+// take first frame of the video
+let frame1 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+cap.read(frame1);
+
+let prvs = new cv.Mat();
+cv.cvtColor(frame1, prvs, cv.COLOR_RGBA2GRAY);
+frame1.delete();
+let hsv = new cv.Mat();
+let hsv0 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
+let hsv1 = new cv.Mat(video.height, video.width, cv.CV_8UC1, new cv.Scalar(255));
+let hsv2 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
+let hsvVec = new cv.MatVector();
+hsvVec.push_back(hsv0); hsvVec.push_back(hsv1); hsvVec.push_back(hsv2);
+
+let frame2 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+let next = new cv.Mat(video.height, video.width, cv.CV_8UC1);
+let flow = new cv.Mat(video.height, video.width, cv.CV_32FC2);
+let flowVec = new cv.MatVector();
+let mag = new cv.Mat(video.height, video.width, cv.CV_32FC1);
+let ang = new cv.Mat(video.height, video.width, cv.CV_32FC1);
+let rgb = new cv.Mat(video.height, video.width, cv.CV_8UC3);
+
+const FPS = 30;
+function processVideo() {
+ try {
+ if (!streaming) {
+ // clean and stop.
+ prvs.delete(); hsv.delete(); hsv0.delete(); hsv1.delete(); hsv2.delete();
+ hsvVec.delete(); frame2.delete(); flow.delete(); flowVec.delete(); next.delete();
+ mag.delete(); ang.delete(); rgb.delete();
+ return;
+ }
+ let begin = Date.now();
+
+ // start processing.
+ cap.read(frame2);
+ cv.cvtColor(frame2, next, cv.COLOR_RGBA2GRAY);
+ cv.calcOpticalFlowFarneback(prvs, next, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
+ cv.split(flow, flowVec);
+ let u = flowVec.get(0);
+ let v = flowVec.get(1);
+ cv.cartToPolar(u, v, mag, ang);
+ u.delete(); v.delete();
+ ang.convertTo(hsv0, cv.CV_8UC1, 180/Math.PI/2);
+ cv.normalize(mag, hsv2, 0, 255, cv.NORM_MINMAX, cv.CV_8UC1);
+ cv.merge(hsvVec, hsv);
+ cv.cvtColor(hsv, rgb, cv.COLOR_HSV2RGB);
+ cv.imshow('canvasOutput', rgb);
+ next.copyTo(prvs);
+
+ // schedule the next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+ } catch (err) {
+ utils.printError(err);
+ }
+};
+
+// schedule the first one.
+setTimeout(processVideo, 0);
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+
+let streaming = false;
+let videoInput = document.getElementById('videoInput');
+let startAndStop = document.getElementById('startAndStop');
+
+startAndStop.addEventListener('click', () => {
+ if (!streaming) {
+ utils.clearError();
+ videoInput.play().then(() => {
+ onVideoStarted();
+ });
+ } else {
+ videoInput.pause();
+ videoInput.currentTime = 0;
+ onVideoStopped();
+ }
+});
+
+function onVideoStarted() {
+ streaming = true;
+ startAndStop.innerText = 'Stop';
+ videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
+ utils.executeCode('codeEditor');
+}
+
+function onVideoStopped() {
+ streaming = false;
+ startAndStop.innerText = 'Start';
+}
+
+videoInput.addEventListener('ended', () => {
+ onVideoStopped();
+});
+
+utils.loadOpenCv(() => {
+ videoInput.addEventListener('canplay', () => {
+ startAndStop.removeAttribute('disabled');
+ });
+ videoInput.src = 'box.mp4';
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Lucas-Kanade Optical Flow Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Lucas-Kanade Optical Flow Example</h2>
+<p>
+ Click <b>Start/Stop</b> button to start or stop the video.<br>
+ The <b>videoInput</b> is a <video> element used as input.
+ The <b>canvasOutput</b> is a <canvas> element used as output.<br>
+ To decide the points, we use <b>cv.goodFeaturesToTrack()</b>. We take the first frame, detect some Shi-Tomasi corner points in it, then we iteratively track those points using <b>cv.calcOpticalFlowPyrLK</b>.<br>
+ The code of <textarea> will be executed when video is started.<br>
+ You can modify the code to investigate more.
+</p>
+<div>
+<div class="control"><button id="startAndStop" disabled>Start</button></div>
+<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+</div>
+<p class="err" id="errorMessage"></p>
+<div>
+ <table cellpadding="0" cellspacing="0" width="0" border="0">
+ <tr>
+ <td>
+ <video id="videoInput" width="320" height="240" muted></video>
+ </td>
+ <td>
+ <canvas id="canvasOutput" width="320" height="240" ></canvas>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <div class="caption">videoInput</div>
+ </td>
+ <td>
+ <div class="caption">canvasOutput</div>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ </table>
+</div>
+<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let video = document.getElementById('videoInput');
+let cap = new cv.VideoCapture(video);
+
+// parameters for ShiTomasi corner detection
+let [maxCorners, qualityLevel, minDistance, blockSize] = [30, 0.3, 7, 7];
+
+// parameters for lucas kanade optical flow
+let winSize = new cv.Size(15, 15);
+let maxLevel = 2;
+let criteria = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03);
+
+// create some random colors
+let color = [];
+for (let i = 0; i < maxCorners; i++) {
+ color.push(new cv.Scalar(parseInt(Math.random()*255), parseInt(Math.random()*255),
+ parseInt(Math.random()*255), 255));
+}
+
+// take first frame and find corners in it
+let oldFrame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+cap.read(oldFrame);
+let oldGray = new cv.Mat();
+cv.cvtColor(oldFrame, oldGray, cv.COLOR_RGB2GRAY);
+let p0 = new cv.Mat();
+let none = new cv.Mat();
+cv.goodFeaturesToTrack(oldGray, p0, maxCorners, qualityLevel, minDistance, none, blockSize);
+
+// Create a mask image for drawing purposes
+let zeroEle = new cv.Scalar(0, 0, 0, 255);
+let mask = new cv.Mat(oldFrame.rows, oldFrame.cols, oldFrame.type(), zeroEle);
+
+let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+let frameGray = new cv.Mat();
+let p1 = new cv.Mat();
+let st = new cv.Mat();
+let err = new cv.Mat();
+
+const FPS = 30;
+function processVideo() {
+ try {
+ if (!streaming) {
+ // clean and stop.
+ frame.delete(); oldGray.delete(); p0.delete(); p1.delete(); err.delete(); mask.delete();
+ return;
+ }
+ let begin = Date.now();
+
+ // start processing.
+ cap.read(frame);
+ cv.cvtColor(frame, frameGray, cv.COLOR_RGBA2GRAY);
+
+ // calculate optical flow
+ cv.calcOpticalFlowPyrLK(oldGray, frameGray, p0, p1, st, err, winSize, maxLevel, criteria);
+
+ // select good points
+ let goodNew = [];
+ let goodOld = [];
+ for (let i = 0; i < st.rows; i++) {
+ if (st.data[i] === 1) {
+ goodNew.push(new cv.Point(p1.data32F[i*2], p1.data32F[i*2+1]));
+ goodOld.push(new cv.Point(p0.data32F[i*2], p0.data32F[i*2+1]));
+ }
+ }
+
+ // draw the tracks
+ for (let i = 0; i < goodNew.length; i++) {
+ cv.line(mask, goodNew[i], goodOld[i], color[i], 2);
+ cv.circle(frame, goodNew[i], 5, color[i], -1);
+ }
+ cv.add(frame, mask, frame);
+
+ cv.imshow('canvasOutput', frame);
+
+ // now update the previous frame and previous points
+ frameGray.copyTo(oldGray);
+ p0.delete(); p0 = null;
+ p0 = new cv.Mat(goodNew.length, 1, cv.CV_32FC2);
+ for (let i = 0; i < goodNew.length; i++) {
+ p0.data32F[i*2] = goodNew[i].x;
+ p0.data32F[i*2+1] = goodNew[i].y;
+ }
+
+ // schedule the next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+ } catch (err) {
+ utils.printError(err);
+ }
+};
+
+// schedule the first one.
+setTimeout(processVideo, 0);
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+
+let streaming = false;
+let videoInput = document.getElementById('videoInput');
+let startAndStop = document.getElementById('startAndStop');
+
+startAndStop.addEventListener('click', () => {
+ if (!streaming) {
+ utils.clearError();
+ videoInput.play().then(() => {
+ onVideoStarted();
+ });
+ } else {
+ videoInput.pause();
+ videoInput.currentTime = 0;
+ onVideoStopped();
+ }
+});
+
+function onVideoStarted() {
+ streaming = true;
+ startAndStop.innerText = 'Stop';
+ videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
+ utils.executeCode('codeEditor');
+}
+
+function onVideoStopped() {
+ streaming = false;
+ startAndStop.innerText = 'Start';
+}
+
+videoInput.addEventListener('ended', () => {
+ onVideoStopped();
+});
+
+utils.loadOpenCv(() => {
+ videoInput.addEventListener('canplay', () => {
+ startAndStop.removeAttribute('disabled');
+ });
+ videoInput.src = 'box.mp4';
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image PyrDown Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image PyrDown Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+// You can try more different parameters
+cv.pyrDown(src, dst, new cv.Size(0, 0), cv.BORDER_DEFAULT);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image PyrUp Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image PyrUp Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+// You can try more different parameters
+cv.pyrUp(src, dst, new cv.Size(0, 0), cv.BORDER_DEFAULT);
+cv.imshow('canvasOutput', dst);
+src.delete(); dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Hello OpenCV.js</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Hello OpenCV.js</h2>
+<p id="status">OpenCV.js is loading...</p>
+<div>
+ <div class="inputoutput">
+ <img id="imageSrc" alt="No Image" />
+ <div class="caption">imageSrc <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput" ></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script type="text/javascript">
+let imgElement = document.getElementById('imageSrc');
+let inputElement = document.getElementById('fileInput');
+inputElement.addEventListener('change', (e) => {
+ imgElement.src = URL.createObjectURL(e.target.files[0]);
+}, false);
+
+imgElement.onload = function() {
+ let mat = cv.imread(imgElement);
+ cv.imshow('canvasOutput', mat);
+ mat.delete();
+};
+
+function onOpenCvReady() { // eslint-disable-line no-unused-vars
+ document.getElementById('status').innerHTML = '<b>OpenCV.js is ready</b>.' +
+ 'You can upload an image.<br>' +
+ 'The <b>imageSrc</b> is a <img> element used as cv.Mat input. ' +
+ 'The <b>canvasOutput</b> is a <canvas> element used as cv.Mat output.';
+}
+
+function onOpenCvError() { // eslint-disable-line no-unused-vars
+ let element = document.getElementById('status');
+ element.setAttribute('class', 'err');
+ element.innerHTML = 'Failed to load opencv.js';
+}
+</script>
+<script async src="opencv.js" type="text/javascript" onload="onOpenCvReady();" onerror="onOpenCvError();"></script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Template Match Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Template Match Example</h2>
+<p>
+ <canvas> elements named <b>imageCanvasInput</b>, <b>templateCanvasInput</b>
+ and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="imageCanvasInput"></canvas>
+ <div class="caption">imageCanvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="templateCanvasInput"></canvas>
+ <div class="caption">templateCanvasInput <input type="file" id="templateFileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('imageCanvasInput');
+let templ = cv.imread('templateCanvasInput');
+let dst = new cv.Mat();
+let mask = new cv.Mat();
+cv.matchTemplate(src, templ, dst, cv.TM_CCOEFF, mask);
+let result = cv.minMaxLoc(dst, mask);
+let maxPoint = result.maxLoc;
+let color = new cv.Scalar(255, 0, 0, 255);
+let point = new cv.Point(maxPoint.x + templ.cols, maxPoint.y + templ.rows);
+cv.rectangle(src, maxPoint, point, color, 2, cv.LINE_8, 0);
+cv.imshow('canvasOutput', src);
+src.delete(); dst.delete(); mask.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'imageCanvasInput');
+utils.loadImageToCanvas('lenaFace.png', 'templateCanvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Adaptive Threshold Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Adaptive Threshold Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
+// You can try more different parameters
+cv.adaptiveThreshold(src, dst, 200, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 3, 2);
+cv.imshow('canvasOutput', dst);
+src.delete();
+dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Threshold Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Threshold Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+// You can try more different parameters
+cv.threshold(src, dst, 177, 200, cv.THRESH_BINARY);
+cv.imshow('canvasOutput', dst);
+src.delete();
+dst.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('lena.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Trackbar Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Trackbar Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput1</b>, <b>canvasInput2</b> and <b>canvasOutput</b> have been prepared.<br>
+ The code of <textarea> will be executed when <input> element named <b>trackbar</b> value changes.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<textarea class="code" rows="12" cols="80" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <b>trackbar</b>
+ <input type="range" id="trackbar" disabled value="50" min="0" max="100" step="1">
+ <label id="weightValue" ></label>
+ <div class="inputoutput">
+ <div>
+ <table cellpadding="0" cellspacing="0" width="0" border="0">
+ <tr>
+ <td>
+ <canvas id="canvasInput1"></canvas>
+ </td>
+ <td>
+ <canvas id="canvasInput2"></canvas>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <p class="caption">canvasInput1</p>
+ </td>
+ <td>
+ <p class="caption">canvasInput2</p>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <canvas id="canvasOutput"></canvas>
+ </td>
+ <td></td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <div class="caption">canvasOutput</div>
+ </td>
+ <td></td>
+ <td></td>
+ <td></td>
+ </tr>
+ </table>
+ </div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let trackbar = document.getElementById('trackbar');
+let alpha = trackbar.value/trackbar.max;
+let beta = ( 1.0 - alpha );
+let src1 = cv.imread('canvasInput1');
+let src2 = cv.imread('canvasInput2');
+let dst = new cv.Mat();
+cv.addWeighted( src1, alpha, src2, beta, 0.0, dst, -1);
+cv.imshow('canvasOutput', dst);
+dst.delete();
+src1.delete();
+src2.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('apple.jpg', 'canvasInput1');
+utils.loadImageToCanvas('orange.jpg', 'canvasInput2');
+
+let trackbar = document.getElementById('trackbar');
+trackbar.addEventListener('input', () => {
+ utils.executeCode('codeEditor');
+});
+
+let weightValue = document.getElementById('weightValue');
+weightValue.innerText = trackbar.value;
+trackbar.addEventListener('input', () => {
+ weightValue.innerText = trackbar.value;
+});
+
+utils.loadOpenCv(() => {
+ trackbar.removeAttribute('disabled');
+ utils.executeCode('codeEditor');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Video Capture Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Video Capture Example</h2>
+<p>
+ Click <b>Start/Stop</b> button to start or stop the camera capture.<br>
+ The <b>videoInput</b> is a <video> element used as OpenCV.js input.
+ The <b>canvasOutput</b> is a <canvas> element used as OpenCv.js output.<br>
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+</p>
+<div>
+<div class="control"><button id="startAndStop" disabled>Start</button></div>
+<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+</div>
+<p class="err" id="errorMessage"></p>
+<div>
+ <table cellpadding="0" cellspacing="0" width="0" border="0">
+ <tr>
+ <td>
+ <video id="videoInput" width=320 height=240></video>
+ </td>
+ <td>
+ <canvas id="canvasOutput" width=320 height=240></canvas>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>
+ <div class="caption">videoInput</div>
+ </td>
+ <td>
+ <div class="caption">canvasOutput</div>
+ </td>
+ <td></td>
+ <td></td>
+ </tr>
+ </table>
+</div>
+<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let video = document.getElementById('videoInput');
+let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
+let dst = new cv.Mat(video.height, video.width, cv.CV_8UC1);
+let cap = new cv.VideoCapture(video);
+
+const FPS = 30;
+function processVideo() {
+ try {
+ if (!streaming) {
+ // clean and stop.
+ src.delete();
+ dst.delete();
+ return;
+ }
+ let begin = Date.now();
+ // start processing.
+ cap.read(src);
+ cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
+ cv.imshow('canvasOutput', dst);
+ // schedule the next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+ } catch (err) {
+ utils.printError(err);
+ }
+};
+
+// schedule the first one.
+setTimeout(processVideo, 0);
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+
+let streaming = false;
+let videoInput = document.getElementById('videoInput');
+let startAndStop = document.getElementById('startAndStop');
+let canvasOutput = document.getElementById('canvasOutput');
+let canvasContext = canvasOutput.getContext('2d');
+
+startAndStop.addEventListener('click', () => {
+ if (!streaming) {
+ utils.clearError();
+ utils.startCamera('qvga', onVideoStarted, 'videoInput');
+ } else {
+ utils.stopCamera();
+ onVideoStopped();
+ }
+});
+
+function onVideoStarted() {
+ streaming = true;
+ startAndStop.innerText = 'Stop';
+ videoInput.width = videoInput.videoWidth;
+ videoInput.height = videoInput.videoHeight;
+ utils.executeCode('codeEditor');
+}
+
+function onVideoStopped() {
+ streaming = false;
+ canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
+ startAndStop.innerText = 'Start';
+}
+
+utils.loadOpenCv(() => {
+ startAndStop.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Background Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Background Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let gray = new cv.Mat();
+let opening = new cv.Mat();
+let coinsBg = new cv.Mat();
+cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
+
+// get background
+let M = cv.Mat.ones(3, 3, cv.CV_8U);
+cv.erode(gray, gray, M);
+cv.dilate(gray, opening, M);
+cv.dilate(opening, coinsBg, M, new cv.Point(-1, -1), 3);
+
+cv.imshow('canvasOutput', coinsBg);
+src.delete(); dst.delete(); gray.delete(); opening.delete(); coinsBg.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('coins.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Distance Transform Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Distance Transform Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let gray = new cv.Mat();
+let opening = new cv.Mat();
+let coinsBg = new cv.Mat();
+let coinsFg = new cv.Mat();
+let distTrans = new cv.Mat();
+cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
+let M = cv.Mat.ones(3, 3, cv.CV_8U);
+cv.erode(gray, gray, M);
+cv.dilate(gray, opening, M);
+cv.dilate(opening, coinsBg, M, new cv.Point(-1, -1), 3);
+
+// distance transform
+cv.distanceTransform(opening, distTrans, cv.DIST_L2, 5);
+cv.normalize(distTrans, distTrans, 1, 0, cv.NORM_INF);
+
+cv.imshow('canvasOutput', distTrans);
+src.delete(); dst.delete(); gray.delete(); opening.delete();
+coinsBg.delete(); coinsFg.delete(); distTrans.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('coins.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Foreground Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Foreground Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let gray = new cv.Mat();
+let opening = new cv.Mat();
+let coinsBg = new cv.Mat();
+let coinsFg = new cv.Mat();
+let distTrans = new cv.Mat();
+cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
+let M = cv.Mat.ones(3, 3, cv.CV_8U);
+cv.erode(gray, gray, M);
+cv.dilate(gray, opening, M);
+cv.dilate(opening, coinsBg, M, new cv.Point(-1, -1), 3);
+cv.distanceTransform(opening, distTrans, cv.DIST_L2, 5);
+cv.normalize(distTrans, distTrans, 1, 0, cv.NORM_INF);
+
+// get foreground
+cv.threshold(distTrans, coinsFg, 0.7 * 1, 255, cv.THRESH_BINARY);
+
+cv.imshow('canvasOutput', coinsFg);
+src.delete(); dst.delete(); gray.delete(); opening.delete();
+coinsBg.delete(); coinsFg.delete(); distTrans.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('coins.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Threshold Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Threshold Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let gray = new cv.Mat();
+
+// gray and threshold image
+cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
+
+cv.imshow('canvasOutput', gray);
+src.delete(); dst.delete(); gray.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('coins.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Image Watershed Example</title>
+<link href="js_example_style.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<h2>Image Watershed Example</h2>
+<p>
+ <canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
+ Click <b>Try it</b> button to see the result. You can choose another image.<br>
+ You can change the code in the <textarea> to investigate more.
+</p>
+<div>
+<div class="control"><button id="tryIt" disabled>Try it</button></div>
+<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
+</textarea>
+<p class="err" id="errorMessage"></p>
+</div>
+<div>
+ <div class="inputoutput">
+ <canvas id="canvasInput"></canvas>
+ <div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput"></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script src="utils.js" type="text/javascript"></script>
+<script id="codeSnippet" type="text/code-snippet">
+let src = cv.imread('canvasInput');
+let dst = new cv.Mat();
+let gray = new cv.Mat();
+let opening = new cv.Mat();
+let coinsBg = new cv.Mat();
+let coinsFg = new cv.Mat();
+let distTrans = new cv.Mat();
+let unknown = new cv.Mat();
+let markers = new cv.Mat();
+// gray and threshold image
+cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
+cv.threshold(gray, gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU);
+// get background
+let M = cv.Mat.ones(3, 3, cv.CV_8U);
+cv.erode(gray, gray, M);
+cv.dilate(gray, opening, M);
+cv.dilate(opening, coinsBg, M, new cv.Point(-1, -1), 3);
+// distance transform
+cv.distanceTransform(opening, distTrans, cv.DIST_L2, 5);
+cv.normalize(distTrans, distTrans, 1, 0, cv.NORM_INF);
+// get foreground
+cv.threshold(distTrans, coinsFg, 0.7 * 1, 255, cv.THRESH_BINARY);
+coinsFg.convertTo(coinsFg, cv.CV_8U, 1, 0);
+cv.subtract(coinsBg, coinsFg, unknown);
+// get connected components markers
+cv.connectedComponents(coinsFg, markers);
+for (let i = 0; i < markers.rows; i++) {
+ for (let j = 0; j < markers.cols; j++) {
+ markers.intPtr(i, j)[0] = markers.ucharPtr(i, j)[0] + 1;
+ if (unknown.ucharPtr(i, j)[0] == 255) {
+ markers.intPtr(i, j)[0] = 0;
+ }
+ }
+}
+cv.cvtColor(src, src, cv.COLOR_RGBA2RGB, 0);
+cv.watershed(src, markers);
+// draw barriers
+for (let i = 0; i < markers.rows; i++) {
+ for (let j = 0; j < markers.cols; j++) {
+ if (markers.intPtr(i, j)[0] == -1) {
+ src.ucharPtr(i, j)[0] = 255; // R
+ src.ucharPtr(i, j)[1] = 0; // G
+ src.ucharPtr(i, j)[2] = 0; // B
+ }
+ }
+}
+cv.imshow('canvasOutput', src);
+src.delete(); dst.delete(); gray.delete(); opening.delete(); coinsBg.delete();
+coinsFg.delete(); distTrans.delete(); unknown.delete(); markers.delete(); M.delete();
+</script>
+<script type="text/javascript">
+let utils = new Utils('errorMessage');
+
+utils.loadCode('codeSnippet', 'codeEditor');
+utils.loadImageToCanvas('coins.jpg', 'canvasInput');
+utils.addFileInputHandler('fileInput', 'canvasInput');
+
+let tryIt = document.getElementById('tryIt');
+tryIt.addEventListener('click', () => {
+ utils.executeCode('codeEditor');
+});
+
+utils.loadOpenCv(() => {
+ tryIt.removeAttribute('disabled');
+});
+</script>
+</body>
+</html>
--- /dev/null
+function Utils(errorOutputId) { // eslint-disable-line no-unused-vars
+ let self = this;
+ this.errorOutput = document.getElementById(errorOutputId);
+
+ const OPENCV_URL = 'opencv.js';
+ this.loadOpenCv = function(onloadCallback) {
+ let script = document.createElement('script');
+ script.setAttribute('async', '');
+ script.setAttribute('type', 'text/javascript');
+ script.addEventListener('load', () => {
+ console.log(cv.getBuildInformation());
+ onloadCallback();
+ });
+ script.addEventListener('error', () => {
+ this.printError('Failed to load ' + OPENCV_URL);
+ });
+ script.src = OPENCV_URL;
+ let node = document.getElementsByTagName('script')[0];
+ node.parentNode.insertBefore(script, node);
+ };
+
+ this.loadImageToCanvas = function(url, cavansId) {
+ let canvas = document.getElementById(cavansId);
+ let ctx = canvas.getContext('2d');
+ let img = new Image();
+ img.crossOrigin = 'anonymous';
+ img.onload = function() {
+ canvas.width = img.width;
+ canvas.height = img.height;
+ ctx.drawImage(img, 0, 0, img.width, img.height);
+ };
+ img.src = url;
+ };
+
+ this.executeCode = function(textAreaId) {
+ try {
+ this.clearError();
+ let code = document.getElementById(textAreaId).value;
+ eval(code);
+ } catch (err) {
+ this.printError(err);
+ }
+ };
+
+ this.clearError = function() {
+ this.errorOutput.innerHTML = '';
+ };
+
+ this.printError = function(err) {
+ if (typeof err === 'undefined') {
+ err = '';
+ } else if (typeof err === 'number') {
+ if (!isNaN(err)) {
+ if (typeof cv !== 'undefined') {
+ err = 'Exception: ' + cv.exceptionFromPtr(err).msg;
+ }
+ }
+ } else if (typeof err === 'string') {
+ let ptr = Number(err.split(' ')[0]);
+ if (!isNaN(ptr)) {
+ if (typeof cv !== 'undefined') {
+ err = 'Exception: ' + cv.exceptionFromPtr(ptr).msg;
+ }
+ }
+ } else if (err instanceof Error) {
+ err = err.stack.replace(/\n/g, '<br>');
+ }
+ this.errorOutput.innerHTML = err;
+ };
+
+ this.loadCode = function(scriptId, textAreaId) {
+ let scriptNode = document.getElementById(scriptId);
+ let textArea = document.getElementById(textAreaId);
+ if (scriptNode.type !== 'text/code-snippet') {
+ throw Error('Unknown code snippet type');
+ }
+ textArea.value = scriptNode.text.replace(/^\n/, '');
+ };
+
+ this.addFileInputHandler = function(fileInputId, canvasId) {
+ let inputElement = document.getElementById(fileInputId);
+ inputElement.addEventListener('change', (e) => {
+ let imgUrl = URL.createObjectURL(e.target.files[0]);
+ loadImageToCanvas(imgUrl, canvasId);
+ }, false);
+ };
+
+ function onVideoCanPlay() {
+ if (self.onCameraStartedCallback) {
+ self.onCameraStartedCallback(self.stream, self.video);
+ }
+ };
+
+ this.startCamera = function(resolution, callback, videoId) {
+ const constraints = {
+ 'qvga': {width: {exact: 320}, height: {exact: 240}},
+ 'vga': {width: {exact: 640}, height: {exact: 480}}};
+ let video = document.getElementById(videoId);
+ if (!video) {
+ video = document.createElement('video');
+ }
+
+ let videoConstraint = constraints[resolution];
+ if (!videoConstraint) {
+ videoConstraint = true;
+ }
+
+ navigator.mediaDevices.getUserMedia({video: videoConstraint, audio: false})
+ .then(function(stream) {
+ video.srcObject = stream;
+ video.play();
+ self.video = video;
+ self.stream = stream;
+ self.onCameraStartedCallback = callback;
+ video.addEventListener('canplay', onVideoCanPlay, false);
+ })
+ .catch(function(err) {
+ self.printError('Camera Error: ' + err.name + ' ' + err.message);
+ });
+ };
+
+ this.stopCamera = function() {
+ if (this.video) {
+ this.video.pause();
+ this.video.srcObject = null;
+ this.video.removeEventListener('canplay', onVideoCanPlay);
+ }
+ if (this.stream) {
+ this.stream.getVideoTracks()[0].stop();
+ }
+ };
+};
--- /dev/null
+Basic Operations on Images {#tutorial_js_basic_ops}
+==========================
+
+Goal
+----
+
+- Learn how to access image properties
+- Learn how to construct Mat
+- Learn how to copy Mat
+- Learn how to convert the type of Mat
+- Learn how to use MatVector
+- Learn how to access pixel values and modify them
+- Learn how to set Region of Interest (ROI)
+- Learn how to split and merge images
+
+Accessing Image Properties
+--------------------------
+
+Image properties include number of rows, columns and size, depth, channels, type of image data.
+
+@code{.js}
+let src = cv.imread("canvasInput");
+console.log('image width: ' + src.cols + '\n' +
+ 'image height: ' + src.rows + '\n' +
+ 'image size: ' + src.size().width + '*' src.size().height + '\n' +
+ 'image depth: ' + src.depth() + '\n' +
+ 'image channels ' + src.channels() + '\n' +
+ 'image type: ' + src.type() + '\n');
+@endcode
+
+@note src.type() is very important while debugging because a large number of errors in OpenCV.js
+code are caused by invalid data type.
+
+How to construct Mat
+--------------------
+
+There are 4 basic constructors:
+
+@code{.js}
+// 1. default constructor
+let mat = new cv.Mat();
+// 2. two-dimensional arrays by size and type
+let mat = new cv.Mat(size, type);
+// 3. two-dimensional arrays by rows, cols, and type
+let mat = new cv.Mat(rows, cols, type);
+// 4. two-dimensional arrays by rows, cols, and type with initialization value
+let mat = new cv.Mat(rows, cols, type, new cv.Scalar());
+@endcode
+
+There are 3 static functions:
+
+@code{.js}
+// 1. Create a Mat which is full of zeros
+let mat = cv.Mat.zeros(rows, cols, type);
+// 2. Create a Mat which is full of ones
+let mat = cv.Mat.ones(rows, cols, type);
+// 3. Create a Mat which is an identity matrix
+let mat = cv.Mat.eye(rows, cols, type);
+@endcode
+
+There are 2 factory functions:
+@code{.js}
+// 1. Use JS array to construct a mat.
+// For example: let mat = cv.matFromArray(2, 2, cv.CV_8UC1, [1, 2, 3, 4]);
+let mat = cv.matFromArray(rows, cols, type, array);
+// 2. Use imgData to construct a mat
+let ctx = canvas.getContext("2d");
+let imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+let mat = cv.matFromImageData(imgData);
+@endcode
+
+@note Don't forget to delete cv.Mat when you don't want to use it any more.
+
+How to copy Mat
+---------------
+
+There are 2 ways to copy a Mat:
+
+@code{.js}
+// 1. Clone
+let dst = src.clone();
+// 2. CopyTo(only entries indicated in the mask are copied)
+src.copyTo(dst, mask);
+@endcode
+
+How to convert the type of Mat
+------------------------------
+
+We use the function: **convertTo(m, rtype, alpha = 1, beta = 0)**
+@param m output matrix; if it does not have a proper size or type before the operation, it is reallocated.
+@param rtype desired output matrix type or, rather, the depth since the number of channels are the same as the input has; if rtype is negative, the output matrix will have the same type as the input.
+@param alpha optional scale factor.
+@param beta optional delta added to the scaled values.
+
+@code{.js}
+src.convertTo(dst, rtype);
+@endcode
+
+How use MatVector
+-----------------
+
+@code{.js}
+let mat = new cv.Mat();
+// Initialise a MatVector
+let matVec = new cv.MatVector();
+// Push a Mat back into MatVector
+matVec.push_back(mat);
+// Get a Mat fom MatVector
+let cnt = matVec.get(0);
+mat.delete(); matVec.delete(); cnt.delete();
+@endcode
+
+@note Don't forget to delete cv.Mat, cv.MatVector and cnt(the Mat you get from MatVector) when you don't want to use them any more.
+
+Accessing and Modifying pixel values
+------------------------------------
+
+Firstly, you should know the following type relationship:
+
+Data Properties | C++ Type | JavaScript Typed Array | Mat Type
+--------------- | -------- | ---------------------- | --------
+data | uchar | Uint8Array | CV_8U
+data8S | char | Int8Array | CV_8S
+data16U | ushort | Uint16Array | CV_16U
+data16S | short | Int16Array | CV_16S
+data32S | int | Int32Array | CV_32S
+data32F | float | Float32Array | CV_32F
+data64F | double | Float64Array | CV_64F
+
+**1. data**
+
+@code{.js}
+let row = 3, col = 4;
+let src = cv.imread("canvasInput");
+if (src.isContinuous()) {
+ let R = src.data[row * src.cols * src.channels() + col * src.channels()];
+ let G = src.data[row * src.cols * src.channels() + col * src.channels() + 1];
+ let B = src.data[row * src.cols * src.channels() + col * src.channels() + 2];
+ let A = src.data[row * src.cols * src.channels() + col * src.channels() + 3];
+}
+@endcode
+
+@note Data manipulation is only valid for continuous Mat. You should use isContinuous() to check first.
+
+**2. at**
+
+Mat Type | At Manipulation
+--------- | ---------------
+CV_8U | ucharAt
+CV_8S | charAt
+CV_16U | ushortAt
+CV_16S | shortAt
+CV_32S | intAt
+CV_32F | floatAt
+CV_64F | doubleAt
+
+@code{.js}
+let row = 3, col = 4;
+let src = cv.imread("canvasInput");
+let R = src.ucharAt(row, col * src.channels());
+let G = src.ucharAt(row, col * src.channels() + 1);
+let B = src.ucharAt(row, col * src.channels() + 2);
+let A = src.ucharAt(row, col * src.channels() + 3);
+@endcode
+
+@note At manipulation is only for single channel access and the value can't be modified.
+
+**3. ptr**
+
+Mat Type | Ptr Manipulation | JavaScript Typed Array
+-------- | --------------- | ----------------------
+CV_8U | ucharPtr | Uint8Array
+CV_8S | charPtr | Int8Array
+CV_16U | ushortPtr | Uint16Array
+CV_16S | shortPtr | Int16Array
+CV_32S | intPtr | Int32Array
+CV_32F | floatPtr | Float32Array
+CV_64F | doublePtr | Float64Array
+
+@code{.js}
+let row = 3, col = 4;
+let src = cv.imread("canvasInput");
+let pixel = src.ucharPtr(row, col);
+let R = pixel[0];
+let G = pixel[1];
+let B = pixel[2];
+let A = pixel[3];
+@endcode
+
+mat.ucharPtr(k) get the k th row of the mat. mat.ucharPtr(i, j) get the i th row and the j th column of the mat.
+
+Image ROI
+---------
+
+Sometimes, you will have to play with certain region of images. For eye detection in images, first
+face detection is done all over the image and when face is obtained, we select the face region alone
+and search for eyes inside it instead of searching whole image. It improves accuracy (because eyes
+are always on faces) and performance (because we search for a small area)
+
+We use the function: **roi (rect)**
+@param rect rectangle Region of Interest.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_basic_ops_roi.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+
+Splitting and Merging Image Channels
+------------------------------------
+
+Sometimes you will need to work separately on R,G,B channels of image. Then you need to split the
+RGB images to single planes. Or another time, you may need to join these individual channels to RGB
+image.
+
+@code{.js}
+let src = cv.imread("canvasInput");
+let rgbaPlanes = new cv.MatVector();
+// Split the Mat
+cv.split(src, rgbaPlanes);
+// Get R channel
+let R = rgbaPlanes.get(0);
+// Merge all channels
+cv.merge(rgbaPlanes, src);
+src.delete(); rgbaPlanes.delete(); R.delete();
+@endcode
+
+@note Don't forget to delete cv.Mat, cv.MatVector and R(the Mat you get from MatVector) when you don't want to use them any more.
+
+Making Borders for Images (Padding)
+-----------------------------------
+
+If you want to create a border around the image, something like a photo frame, you can use
+**cv.copyMakeBorder()** function. But it has more applications for convolution operation, zero
+padding etc. This function takes following arguments:
+
+- **src** - input image
+- **top**, **bottom**, **left**, **right** - border width in number of pixels in corresponding
+ directions
+
+- **borderType** - Flag defining what kind of border to be added. It can be following types:
+ - **cv.BORDER_CONSTANT** - Adds a constant colored border. The value should be given
+ as next argument.
+ - **cv.BORDER_REFLECT** - Border will be mirror reflection of the border elements,
+ like this : *fedcba|abcdefgh|hgfedcb*
+ - **cv.BORDER_REFLECT_101** or **cv.BORDER_DEFAULT** - Same as above, but with a
+ slight change, like this : *gfedcb|abcdefgh|gfedcba*
+ - **cv.BORDER_REPLICATE** - Last element is replicated throughout, like this:
+ *aaaaaa|abcdefgh|hhhhhhh*
+ - **cv.BORDER_WRAP** - Can't explain, it will look like this :
+ *cdefgh|abcdefgh|abcdefg*
+
+- **value** - Color of border if border type is cv.BORDER_CONSTANT
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_basic_ops_copymakeborder.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Arithmetic Operations on Images {#tutorial_js_image_arithmetics}
+===============================
+
+Goal
+----
+
+- Learn several arithmetic operations on images like addition, subtraction, bitwise operations
+ etc.
+- You will learn these functions : **cv.add()**, **cv.subtract()** etc.
+
+Image Addition
+--------------
+
+You can add two images by OpenCV function, cv.add(). res = img1 + img2. Both images should be of same depth and type.
+
+For example, consider below sample:
+@code{.js}
+let src1 = cv.imread("canvasInput1");
+let src2 = cv.imread("canvasInput2");
+let dst = new cv.Mat();
+let mask = new cv.Mat();
+let dtype = -1;
+cv.add(src1, src2, dst, mask, dtype);
+src1.delete(); src2.delete(); dst.delete(); mask.delete();
+@endcode
+
+Image Subtraction
+--------------
+
+You can subtract two images by OpenCV function, cv.subtract(). res = img1 - img2. Both images should be of same depth and type.
+
+For example, consider below sample:
+@code{.js}
+let src1 = cv.imread("canvasInput1");
+let src2 = cv.imread("canvasInput2");
+let dst = new cv.Mat();
+let mask = new cv.Mat();
+let dtype = -1;
+cv.subtract(src1, src2, dst, mask, dtype);
+src1.delete(); src2.delete(); dst.delete(); mask.delete();
+@endcode
+
+Bitwise Operations
+------------------
+
+This includes bitwise AND, OR, NOT and XOR operations. They will be highly useful while extracting
+any part of the image, defining and working with non-rectangular
+ROI etc. Below we will see an example on how to change a particular region of an image.
+
+I want to put OpenCV logo above an image. If I add two images, it will change color. If I blend it,
+I get an transparent effect. But I want it to be opaque. If it was a rectangular region, I could use
+ROI as we did in last chapter. But OpenCV logo is a not a rectangular shape. So you can do it with
+bitwise operations.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_image_arithmetics_bitwise.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Some Data Structures {#tutorial_js_some_data_structures}
+===============================
+
+Goal
+----
+
+- You will learn some data structures : **Point**, **Scalar**, **Size**, **Circle**, **Rect**, **RotatedRect** etc.
+
+Scalar is array type in Javascript. Point, Size, Circle, Rect and RotatedRect are object type in JavaScript.
+
+Point
+--------------
+
+There are 2 ways to construct a Point and they are the same:
+@code{.js}
+// The first way
+let point = new cv.Point(x, y);
+// The second way
+let point = {x: x, y: y};
+@endcode
+
+@param x x coordinate of the point.(the origin is the top left corner of the image)
+@param y y coordinate of the point.
+
+Scalar
+--------------
+
+There are 2 ways to construct a Scalar and they are the same:
+@code{.js}
+// The first way
+let scalar = new cv.Scalar(R, G, B, Alpha);
+// The second way
+let scalar = [R, G, B, Alpha];
+@endcode
+
+@param R pixel value of red channel.
+@param G pixel value of green channel.
+@param B pixel value of blue channel.
+@param Alpha pixel value of alpha channel.
+
+Size
+------------------
+
+There are 2 ways to construct a Size and they are the same:
+@code{.js}
+// The first way
+let size = new cv.Size(width, height);
+// The second way
+let size = {width : width, height : height};
+@endcode
+
+@param width the width of the size.
+@param height the height of the size.
+
+Circle
+------------------
+
+There are 2 ways to construct a Circle and they are the same:
+@code{.js}
+// The first way
+let circle = new cv.Circle(center, radius);
+// The second way
+let circle = {center : center, radius : radius};
+@endcode
+
+@param center the center of the circle.
+@param radius the radius of the circle.
+
+Rect
+------------------
+
+There are 2 ways to construct a Rect and they are the same:
+@code{.js}
+// The first way
+let rect = new cv.Rect(x, y, width, height);
+// The second way
+let rect = {x : x, y : y, width : width, height : height};
+@endcode
+
+@param x x coordinate of the vertex which is the top left corner of the rectangle.
+@param y y coordinate of the vertex which is the top left corner of the rectangle.
+@param width the width of the rectangle.
+@param height the height of the rectangle.
+
+RotatedRect
+------------------
+
+There are 2 ways to construct a RotatedRect and they are the same:
+@code{.js}
+// The first way
+let rotatedRect = new cv.RotatedRect(center, size, angle);
+// The second way
+let rotatedRect = {center : center, size : size, angle : angle};
+@endcode
+
+@param center the rectangle mass center.
+@param size width and height of the rectangle.
+@param angle the rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.
+
+Learn how to get the vertices from rotatedRect:
+
+We use the function: **cv.RotatedRect.points(rotatedRect)**
+@param rotatedRect rotated rectangle
+
+@code{.js}
+let vertices = cv.RotatedRect.points(rotatedRect);
+let point1 = vertices[0];
+let point2 = vertices[1];
+let point3 = vertices[2];
+let point4 = vertices[3];
+@endcode
+
+Learn how to get the bounding rectangle from rotatedRect:
+
+We use the function: **cv.RotatedRect.boundingRect(rotatedRect)**
+@param rotatedRect rotated rectangle
+
+@code{.js}
+let boundingRect = cv.RotatedRect.boundingRect(rotatedRect);
+@endcode
\ No newline at end of file
--- /dev/null
+Core Operations {#tutorial_js_table_of_contents_core}
+===============
+
+- @subpage tutorial_js_basic_ops
+
+ Learn to read and
+ edit pixel values, working with image ROI and other basic operations.
+
+- @subpage tutorial_js_image_arithmetics
+
+ Perform arithmetic
+ operations on images
+
+- @subpage tutorial_js_some_data_structures
+
+ Learn some data structures
--- /dev/null
+Getting Started with Images {#tutorial_js_image_display}
+===========================
+
+Goals
+-----
+
+- Learn how to read an image and how to display it in a web.
+
+Read an image
+-------------
+
+OpenCV.js saves images as cv.Mat type. We use HTML canvas element to transfer cv.Mat to the web
+or in reverse. The ImageData interface can represent or set the underlying pixel data of an area of a
+canvas element.
+
+@sa Please refer to canvas docs for more details.
+
+First, create an ImageData obj from canvas:
+@code{.js}
+let canvas = document.getElementById(canvasInputId);
+let ctx = canvas.getContext('2d');
+let imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+@endcode
+
+Then, use cv.matFromImageData to construct a cv.Mat:
+@code{.js}
+let src = cv.matFromImageData(imgData);
+@endcode
+
+@note Because canvas only support 8-bit RGBA image with continuous storage, the cv.Mat type is cv.CV_8UC4.
+It is different from native OpenCV because images returned and shown by the native **imread** and
+**imshow** have the channels stored in BGR order.
+
+Display an image
+----------------
+
+First, convert the type of src to cv.CV_8UC4:
+@code{.js}
+let dst = new cv.Mat();
+// scale and shift are used to map the data to [0, 255].
+src.convertTo(dst, cv.CV_8U, scale, shift);
+// *** is GRAY, RGB, or RGBA, according to src.channels() is 1, 3 or 4.
+cv.cvtColor(dst, dst, cv.COLOR_***2RGBA);
+@endcode
+
+Then, new an ImageData obj from dst:
+@code{.js}
+let imgData = new ImageData(new Uint8ClampedArray(dst.data, dst.cols, dst.rows);
+@endcode
+
+Finally, display it:
+@code{.js}
+let canvas = document.getElementById(canvasOutputId);
+let ctx = canvas.getContext('2d');
+ctx.clearRect(0, 0, canvas.width, canvas.height);
+canvas.width = imgData.width;
+canvas.height = imgData.height;
+ctx.putImageData(imgData, 0, 0);
+@endcode
+
+In OpenCV.js
+------------
+
+OpenCV.js implements image reading and showing using the above method.
+
+We use **cv.imread (imageSource)** to read an image from html canvas or img element.
+@param imageSource canvas element or id, or img element or id.
+@return mat with channels stored in RGBA order.
+
+We use **cv.imshow (canvasSource, mat)** to display it. The function may scale the mat,
+depending on its depth:
+- If the mat is 8-bit unsigned, it is displayed as is.
+- If the mat is 16-bit unsigned or 32-bit integer, the pixels are divided by 256. That
+is, the value range [0,255*256] is mapped to [0,255].
+- If the mat is 32-bit floating-point, the pixel values are multiplied by 255. That is,
+the value range [0,1] is mapped to [0,255].
+
+@param canvasSource canvas element or id.
+@param mat mat to be shown.
+
+The above code of image reading and showing could be simplified as below.
+@code{.js}
+let img = cv.imread(imageSource);
+cv.imshow(canvasOutput, img);
+img.delete();
+@endcode
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_image_display.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
--- /dev/null
+GUI Features {#tutorial_js_table_of_contents_gui}
+============
+
+- @subpage tutorial_js_image_display
+
+ Learn to load an image and display it in a web
+
+- @subpage tutorial_js_video_display
+
+ Learn to capture video from Camera and play it
+
+- @subpage tutorial_js_trackbar
+
+ Create trackbar to control certain parameters
--- /dev/null
+Add a Trackbar to Your Application {#tutorial_js_trackbar}
+==================================
+
+Goal
+----
+
+- Use HTML DOM Input Range Object to add a trackbar to your application.
+
+Code Demo
+---------
+
+Here, we will create a simple application that blends two images. We will let the user enter the
+weight by using the trackbar.
+
+First, we need to create three canvas elements: two for input and one for output. Please refer to
+the tutorial @ref tutorial_js_image_display.
+@code{.js}
+let src1 = cv.imread('canvasInput1');
+let src2 = cv.imread('canvasInput2');
+@endcode
+
+Then, we use HTML DOM Input Range Object to implement the trackbar, which is shown as below.
+![](images/Trackbar_Tutorial_Range.png)
+
+@note <input> elements with type="range" are not supported in Internet Explorer 9 and earlier versions.
+
+You can create an <input> element with type="range" with the document.createElement() method:
+@code{.js}
+let x = document.createElement('INPUT');
+x.setAttribute('type', 'range');
+@endcode
+
+You can access an <input> element with type="range" with getElementById():
+@code{.js}
+let x = document.getElementById('myRange');
+@endcode
+
+As a trackbar, the range element need a trackbar name, the default value, minimum value, maximum value,
+step and the callback function which is executed everytime trackbar value changes. The callback function
+always has a default argument, which is the trackbar position. Additionally, a text element to display the
+trackbar value is fine. In our case, we can create the trackbar as below:
+@code{.html}
+Weight: <input type="range" id="trackbar" value="50" min="0" max="100" step="1" oninput="callback()">
+<input type="text" id="weightValue" size="3" value="50"/>
+@endcode
+
+Finally, we can use the trackbar value in the callback function, blend the two images, and display the result.
+@code{.js}
+let weightValue = document.getElementById('weightValue');
+let trackbar = document.getElementById('trackbar');
+weightValue.setAttribute('value', trackbar.value);
+let alpha = trackbar.value/trackbar.max;
+let beta = ( 1.0 - alpha );
+let src1 = cv.imread('canvasInput1');
+let src2 = cv.imread('canvasInput2');
+let dst = new cv.Mat();
+cv.addWeighted( src1, alpha, src2, beta, 0.0, dst, -1);
+cv.imshow('canvasOutput', dst);
+dst.delete();
+src1.delete();
+src2.delete();
+@endcode
+
+@sa cv.addWeighted
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_trackbar.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
--- /dev/null
+Getting Started with Videos {#tutorial_js_video_display}
+===========================
+
+Goal
+----
+
+- Learn to capture video from a camera and display it.
+
+Capture video from camera
+-------------------------
+
+Often, we have to capture live stream with a camera. In OpenCV.js, we use [WebRTC](https://webrtc.org/)
+and HTML canvas element to implement this. Let's capture a video from the camera(built-in
+or a usb), convert it into grayscale video and display it.
+
+To capture a video, you need to add some HTML elements to the web page:
+- a <video> to display video from camera directly
+- a <canvas> to transfer video to canvas ImageData frame-by-frame
+- another <canvas> to display the video OpenCV.js gets
+
+First, we use WebRTC navigator.mediaDevices.getUserMedia to get the media stream.
+@code{.js}
+let video = document.getElementById("videoInput"); // video is the id of video tag
+navigator.mediaDevices.getUserMedia({ video: true, audio: false })
+ .then(function(stream) {
+ video.srcObject = stream;
+ video.play();
+ })
+ .catch(function(err) {
+ console.log("An error occured! " + err);
+ });
+@endcode
+
+@note This function is unnecessary when you capture video from a video file. But notice that
+HTML video element only supports video formats of Ogg(Theora), WebM(VP8/VP9) or MP4(H.264).
+
+Playing video
+-------------
+Now, the browser gets the camera stream. Then, we use CanvasRenderingContext2D.drawImage() method
+of the Canvas 2D API to draw video onto the canvas. Finally, we can use the method in @ref tutorial_js_image_display
+ to read and display image in canvas. For playing video, cv.imshow() should be executed every delay
+milliseconds. We recommend setTimeout() method. And if the video is 30fps, the delay milliseconds
+should be (1000/30 - processing_time).
+@code{.js}
+let canvasFrame = document.getElementById("canvasFrame"); // canvasFrame is the id of <canvas>
+let context = canvasFrame.getContext("2d");
+let src = new cv.Mat(height, width, cv.CV_8UC4);
+let dst = new cv.Mat(height, width, cv.CV_8UC1);
+
+const FPS = 30;
+function processVideo() {
+ let begin = Date.now();
+ context.drawImage(video, 0, 0, width, height);
+ src.data.set(context.getImageData(0, 0, width, height).data);
+ cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
+ cv.imshow("canvasOutput", dst); // canvasOutput is the id of another <canvas>;
+ // schedule next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+}
+
+// schedule first one.
+setTimeout(processVideo, 0);
+@endcode
+
+OpenCV.js implements **cv.VideoCapture (videoSource)** using the above method. You need not to
+add the hidden canvas element manually.
+@param videoSource the video id or element.
+@return cv.VideoCapture instance
+
+We use **read (image)** to get one frame of the video. For performance reasons, the image should be
+constructed with cv.CV_8UC4 type and same size as the video.
+@param image image with cv.CV_8UC4 type and same size as the video.
+
+The above code of playing video could be simplified as below.
+@code{.js}
+let src = new cv.Mat(height, width, cv.CV_8UC4);
+let dst = new cv.Mat(height, width, cv.CV_8UC1);
+let cap = new cv.VideoCapture(videoSource);
+
+const FPS = 30;
+function processVideo() {
+ let begin = Date.now();
+ cap.read(src);
+ cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
+ cv.imshow("canvasOutput", dst);
+ // schedule next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+}
+
+// schedule first one.
+setTimeout(processVideo, 0);
+@endcode
+
+@note Remember to delete src and dst after when stop.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_video_display.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Canny Edge Detection {#tutorial_js_canny}
+====================
+
+Goal
+----
+
+- Concept of Canny edge detection
+- OpenCV functions for that : **cv.Canny()**
+
+Theory
+------
+
+Canny Edge Detection is a popular edge detection algorithm. It was developed by John F. Canny in 1986. It is a multi-stage algorithm and we will go through each stages.
+
+-# **Noise Reduction**
+
+ Since edge detection is susceptible to noise in the image, first step is to remove the noise in the
+ image with a 5x5 Gaussian filter. We have already seen this in previous chapters.
+
+-# **Finding Intensity Gradient of the Image**
+
+ Smoothened image is then filtered with a Sobel kernel in both horizontal and vertical direction to
+ get first derivative in horizontal direction (\f$G_x\f$) and vertical direction (\f$G_y\f$). From these two
+ images, we can find edge gradient and direction for each pixel as follows:
+
+ \f[
+ Edge\_Gradient \; (G) = \sqrt{G_x^2 + G_y^2} \\
+ Angle \; (\theta) = \tan^{-1} \bigg(\frac{G_y}{G_x}\bigg)
+ \f]
+
+ Gradient direction is always perpendicular to edges. It is rounded to one of four angles
+ representing vertical, horizontal and two diagonal directions.
+
+-# **Non-maximum Suppression**
+
+ After getting gradient magnitude and direction, a full scan of image is done to remove any unwanted
+ pixels which may not constitute the edge. For this, at every pixel, pixel is checked if it is a
+ local maximum in its neighborhood in the direction of gradient. Check the image below:
+
+ ![image](images/nms.jpg)
+
+ Point A is on the edge ( in vertical direction). Gradient direction is normal to the edge. Point B
+ and C are in gradient directions. So point A is checked with point B and C to see if it forms a
+ local maximum. If so, it is considered for next stage, otherwise, it is suppressed ( put to zero).
+
+ In short, the result you get is a binary image with "thin edges".
+
+-# **Hysteresis Thresholding**
+
+ This stage decides which are all edges are really edges and which are not. For this, we need two
+ threshold values, minVal and maxVal. Any edges with intensity gradient more than maxVal are sure to
+ be edges and those below minVal are sure to be non-edges, so discarded. Those who lie between these
+ two thresholds are classified edges or non-edges based on their connectivity. If they are connected
+ to "sure-edge" pixels, they are considered to be part of edges. Otherwise, they are also discarded.
+ See the image below:
+
+ ![image](images/hysteresis.jpg)
+
+ The edge A is above the maxVal, so considered as "sure-edge". Although edge C is below maxVal, it is
+ connected to edge A, so that also considered as valid edge and we get that full curve. But edge B,
+ although it is above minVal and is in same region as that of edge C, it is not connected to any
+ "sure-edge", so that is discarded. So it is very important that we have to select minVal and maxVal
+ accordingly to get the correct result.
+
+ This stage also removes small pixels noises on the assumption that edges are long lines.
+
+So what we finally get is strong edges in the image.
+
+Canny Edge Detection in OpenCV
+------------------------------
+
+We use the function: **cv.Canny(image, edges, threshold1, threshold2, apertureSize = 3, L2gradient = false)**
+@param image 8-bit input image.
+@param edges output edge map; single channels 8-bit image, which has the same size as image.
+@param threshold1 first threshold for the hysteresis procedure.
+@param threshold2 second threshold for the hysteresis procedure..
+@param apertureSize aperture size for the Sobel operator.
+@param L2gradient specifies the equation for finding gradient
+magnitude. If it is True, it uses the equation mentioned above which is more accurate, otherwise it uses this function: \f$Edge\_Gradient \; (G) = |G_x| + |G_y|\f$.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_canny.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Changing Colorspaces {#tutorial_js_colorspaces}
+====================
+
+Goal
+----
+
+- In this tutorial, you will learn how to convert images from one color-space to another, like
+ RGB \f$\leftrightarrow\f$ Gray, RGB \f$\leftrightarrow\f$ HSV etc.
+- You will learn following functions : **cv.cvtColor()**, **cv.inRange()** etc.
+
+cvtColor
+--------------------
+
+There are more than 150 color-space conversion methods available in OpenCV. But we will look into
+the most widely used one: RGB \f$\leftrightarrow\f$ Gray.
+
+We use the function: **cv.cvtColor (src, dst, code, dstCn = 0)**
+@param src input image.
+@param dst output image of the same size and depth as src
+@param code color space conversion code(see **cv.ColorConversionCodes**).
+@param dstCn number of channels in the destination image; if the parameter is 0, the number of the channels is derived automatically from src and code.
+
+For RGB \f$\rightarrow\f$ Gray conversion we use the code cv.COLOR_RGBA2GRAY.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_colorspaces_cvtColor.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+inRange
+---------------
+
+Checks if array elements lie between the elements of two other arrays.
+
+We use the function: **cv.inRange (src, lowerb, upperb, dst)**
+@param src first input image.
+@param lowerb inclusive lower boundary Mat of the same size as src.
+@param upperb inclusive upper boundary Mat of the same size as src.
+@param dst output image of the same size as src and cv.CV_8U type.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_colorspaces_inRange.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Contour Features {#tutorial_js_contour_features}
+================
+
+Goal
+----
+
+- To find the different features of contours, like area, perimeter, centroid, bounding box etc
+- You will learn plenty of functions related to contours.
+
+1. Moments
+----------
+
+Image moments help you to calculate some features like center of mass of the object, area of the
+object etc. Check out the wikipedia page on [Image
+Moments](http://en.wikipedia.org/wiki/Image_moment)
+
+We use the function: **cv.moments (array, binaryImage = false)**
+@param array raster image (single-channel, 8-bit or floating-point 2D array) or an array ( 1×N or N×1 ) of 2D points.
+@param binaryImage if it is true, all non-zero image pixels are treated as 1's. The parameter is used for images only.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_moments.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+From this moments, you can extract useful data like area, centroid etc. Centroid is given by the
+relations, \f$C_x = \frac{M_{10}}{M_{00}}\f$ and \f$C_y = \frac{M_{01}}{M_{00}}\f$. This can be done as
+follows:
+@code{.js}
+let cx = M.m10/M.m00
+let cy = M.m01/M.m00
+@endcode
+
+2. Contour Area
+---------------
+
+Contour area is given by the function **cv.contourArea()** or from moments, **M['m00']**.
+
+We use the function: **cv.contourArea (contour, oriented = false)**
+@param contour input vector of 2D points (contour vertices)
+@param oriented oriented area flag. If it is true, the function returns a signed area value, depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can determine orientation of a contour by taking the sign of an area. By default, the parameter is false, which means that the absolute value is returned.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_area.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+3. Contour Perimeter
+--------------------
+
+It is also called arc length. It can be found out using **cv.arcLength()** function.
+
+We use the function: **cv.arcLength (curve, closed)**
+@param curve input vector of 2D points.
+@param closed flag indicating whether the curve is closed or not.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_perimeter.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+4. Contour Approximation
+------------------------
+
+It approximates a contour shape to another shape with less number of vertices depending upon the
+precision we specify. It is an implementation of [Douglas-Peucker
+algorithm](http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm). Check the wikipedia page
+for algorithm and demonstration.
+
+We use the function: **cv.approxPolyDP (curve, approxCurve, epsilon, closed)**
+@param curve input vector of 2D points stored in cv.Mat.
+@param approxCurve result of the approximation. The type should match the type of the input curve.
+@param epsilon parameter specifying the approximation accuracy. This is the maximum distance between the original curve and its approximation.
+@param closed If true, the approximated curve is closed (its first and last vertices are connected). Otherwise, it is not closed.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_approxPolyDP.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+5. Convex Hull
+--------------
+
+Convex Hull will look similar to contour approximation, but it is not (Both may provide same results
+in some cases). Here, **cv.convexHull()** function checks a curve for convexity defects and
+corrects it. Generally speaking, convex curves are the curves which are always bulged out, or
+at-least flat. And if it is bulged inside, it is called convexity defects. For example, check the
+below image of hand. Red line shows the convex hull of hand. The double-sided arrow marks shows the
+convexity defects, which are the local maximum deviations of hull from contours.
+
+![image](images/convexitydefects.jpg)
+
+We use the function: **cv.convexHull (points, hull, clockwise = false, returnPoints = true)**
+@param points input 2D point set.
+@param hull output convex hull.
+@param clockwise orientation flag. If it is true, the output convex hull is oriented clockwise. Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing to the right, and its Y axis pointing upwards.
+@param returnPoints operation flag. In case of a matrix, when the flag is true, the function returns convex hull points. Otherwise, it returns indices of the convex hull points.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_convexHull.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+6. Checking Convexity
+---------------------
+
+There is a function to check if a curve is convex or not, **cv.isContourConvex()**. It just return
+whether True or False. Not a big deal.
+
+@code{.js}
+cv.isContourConvex(cnt);
+@endcode
+
+7. Bounding Rectangle
+---------------------
+
+There are two types of bounding rectangles.
+
+### 7.a. Straight Bounding Rectangle
+
+It is a straight rectangle, it doesn't consider the rotation of the object. So area of the bounding
+rectangle won't be minimum.
+
+We use the function: **cv.boundingRect (points)**
+@param points input 2D point set.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_boundingRect.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 7.b. Rotated Rectangle
+
+Here, bounding rectangle is drawn with minimum area, so it considers the rotation also.
+
+We use the function: **cv.minAreaRect (points)**
+@param points input 2D point set.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_minAreaRect.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+8. Minimum Enclosing Circle
+---------------------------
+
+Next we find the circumcircle of an object using the function **cv.minEnclosingCircle()**. It is a
+circle which completely covers the object with minimum area.
+
+We use the functions: **cv.minEnclosingCircle (points)**
+@param points input 2D point set.
+
+**cv.circle (img, center, radius, color, thickness = 1, lineType = cv.LINE_8, shift = 0)**
+@param img image where the circle is drawn.
+@param center center of the circle.
+@param radius radius of the circle.
+@param color circle color.
+@param thickness thickness of the circle outline, if positive. Negative thickness means that a filled circle is to be drawn.
+@param lineType type of the circle boundary.
+@param shift number of fractional bits in the coordinates of the center and in the radius value.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_minEnclosingCircle.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+9. Fitting an Ellipse
+---------------------
+
+Next one is to fit an ellipse to an object. It returns the rotated rectangle in which the ellipse is
+inscribed.
+We use the functions: **cv.fitEllipse (points)**
+@param points input 2D point set.
+
+**cv.ellipse1 (img, box, color, thickness = 1, lineType = cv.LINE_8)**
+@param img image.
+@param box alternative ellipse representation via RotatedRect. This means that the function draws an ellipse inscribed in the rotated rectangle.
+@param color ellipse color.
+@param thickness thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn.
+@param lineType type of the ellipse boundary.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_fitEllipse.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+10. Fitting a Line
+------------------
+
+Similarly we can fit a line to a set of points. We can approximate a straight line to it.
+
+We use the functions: **cv.fitLine (points, line, distType, param, reps, aeps)**
+@param points input 2D point set.
+@param line output line parameters. It should be a Mat of 4 elements[vx, vy, x0, y0], where [vx, vy] is a normalized vector collinear to the line and [x0, y0] is a point on the line.
+@param distType distance used by the M-estimator(see cv.DistanceTypes).
+@param param numerical parameter ( C ) for some types of distances. If it is 0, an optimal value is chosen.
+@param reps sufficient accuracy for the radius (distance between the coordinate origin and the line).
+@param aeps sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps.
+
+**cv.line (img, pt1, pt2, color, thickness = 1, lineType = cv.LINE_8, shift = 0)**
+@param img image.
+@param pt1 first point of the line segment.
+@param pt2 second point of the line segment.
+@param color line color.
+@param thickness line thickness.
+@param lineType type of the line,.
+@param shift number of fractional bits in the point coordinates.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contour_features_fitLine.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Contour Properties {#tutorial_js_contour_properties}
+==================
+
+Goal
+----
+
+- Here we will learn to extract some frequently used properties of objects like Solidity, Equivalent
+Diameter, Mask image, Mean Intensity etc.
+
+1. Aspect Ratio
+---------------
+
+It is the ratio of width to height of bounding rect of the object.
+
+\f[Aspect \; Ratio = \frac{Width}{Height}\f]
+@code{.js}
+let rect = cv.boundingRect(cnt);
+let aspectRatio = rect.width / rect.height;
+@endcode
+
+2. Extent
+---------
+
+Extent is the ratio of contour area to bounding rectangle area.
+
+\f[Extent = \frac{Object \; Area}{Bounding \; Rectangle \; Area}\f]
+@code{.js}
+let area = cv.contourArea(cnt, false);
+let rect = cv.boundingRect(cnt));
+let rectArea = rect.width * rect.height;
+let extent = area / rectArea;
+@endcode
+
+3. Solidity
+-----------
+
+Solidity is the ratio of contour area to its convex hull area.
+
+\f[Solidity = \frac{Contour \; Area}{Convex \; Hull \; Area}\f]
+@code{.js}
+let area = cv.contourArea(cnt, false);
+cv.convexHull(cnt, hull, false, true);
+let hullArea = cv.contourArea(hull, false);
+let solidity = area / hullArea;
+@endcode
+
+4. Equivalent Diameter
+----------------------
+
+Equivalent Diameter is the diameter of the circle whose area is same as the contour area.
+
+\f[Equivalent \; Diameter = \sqrt{\frac{4 \times Contour \; Area}{\pi}}\f]
+@code{.js}
+let area = cv.contourArea(cnt, false);
+let equiDiameter = Math.sqrt(4 * area / Math.PI);
+@endcode
+
+5. Orientation
+--------------
+
+Orientation is the angle at which object is directed. Following method also gives the Major Axis and
+Minor Axis lengths.
+@code{.js}
+let rotatedRect = cv.fitEllipse(cnt);
+let angle = rotatedRect.angle;
+@endcode
+
+6. Mask and Pixel Points
+------------------------
+
+In some cases, we may need all the points which comprises that object.
+
+We use the function: **cv.transpose (src, dst)**
+@param src input array.
+@param dst output array of the same type as src.
+
+\htmlonly
+<iframe src="../../js_contour_properties_transpose.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+7. Maximum Value, Minimum Value and their locations
+---------------------------------------------------
+
+We use the function: **cv.minMaxLoc(src, mask)**
+@param src input single-channel array.
+@param mask optional mask used to select a sub-array.
+
+@code{.js}
+let result = cv.minMaxLoc(src, mask);
+let minVal = result.minVal;
+let maxVal = result.maxVal;
+let minLoc = result.minLoc;
+let maxLoc = result.maxLoc;
+@endcode
+
+8. Mean Color or Mean Intensity
+-------------------------------
+
+Here, we can find the average color of an object. Or it can be average intensity of the object in
+grayscale mode. We again use the same mask to do it.
+
+We use the function: **cv.mean (src, mask)**
+@param src input array that should have from 1 to 4 channels so that the result can be stored in Scalar.
+@param mask optional operation mask.
+
+@code{.js}
+let average = cv.mean(src, mask);
+@endcode
--- /dev/null
+Contours : Getting Started {#tutorial_js_contours_begin}
+==========================
+
+Goal
+----
+
+- Understand what contours are.
+- Learn to find contours, draw contours etc
+- You will learn these functions : **cv.findContours()**, **cv.drawContours()**
+
+What are contours?
+------------------
+
+Contours can be explained simply as a curve joining all the continuous points (along the boundary),
+having same color or intensity. The contours are a useful tool for shape analysis and object
+detection and recognition.
+
+- For better accuracy, use binary images. So before finding contours, apply threshold or canny
+ edge detection.
+- Since opencv 3.2 source image is not modified by this function.
+- In OpenCV, finding contours is like finding white object from black background. So remember,
+ object to be found should be white and background should be black.
+
+How to draw the contours?
+-------------------------
+
+To draw the contours, cv.drawContours function is used. It can also be used to draw any shape
+provided you have its boundary points.
+
+We use the functions: **cv.findContours (image, contours, hierarchy, mode, method, offset = new cv.Point(0, 0))**
+@param image source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as binary.
+@param contours detected contours.
+@param hierarchy containing information about the image topology. It has as many elements as the number of contours.
+@param mode contour retrieval mode(see cv.RetrievalModes).
+@param method contour approximation method(see cv.ContourApproximationModes).
+@param offset optional offset by which every contour point is shifted. This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.
+
+**cv.drawContours (image, contours, contourIdx, color, thickness = 1, lineType = cv.LINE_8, hierarchy = new cv.Mat(), maxLevel = INT_MAX, offset = new cv.Point(0, 0))**
+@param image destination image.
+@param contours all the input contours.
+@param contourIdx parameter indicating a contour to draw. If it is negative, all the contours are drawn.
+@param color color of the contours.
+@param thickness thickness of lines the contours are drawn with. If it is negative, the contour interiors are drawn.
+@param lineType line connectivity(see cv.LineTypes).
+@param hierarchy optional information about hierarchy. It is only needed if you want to draw only some of the contours(see maxLevel).
+
+@param maxLevel maximal level for drawn contours. If it is 0, only the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is hierarchy available.
+@param offset optional contour shift parameter.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contours_begin_contours.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Contour Approximation Method
+============================
+
+This is the fifth argument in cv.findContours function. What does it denote actually?
+
+Above, we told that contours are the boundaries of a shape with same intensity. It stores the (x,y)
+coordinates of the boundary of a shape. But does it store all the coordinates ? That is specified by
+this contour approximation method.
+
+If you pass cv.ContourApproximationModes.CHAIN_APPROX_NONE.value, all the boundary points are stored. But actually do we need all
+the points? For eg, you found the contour of a straight line. Do you need all the points on the line
+to represent that line? No, we need just two end points of that line. This is what
+cv2.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby
+saving memory.
\ No newline at end of file
--- /dev/null
+Contours Hierarchy {#tutorial_js_contours_hierarchy}
+==================
+
+Goal
+----
+
+- This time, we learn about the hierarchy of contours, i.e. the parent-child relationship in Contours.
+
+Theory
+------
+
+In the last few articles on contours, we have worked with several functions related to contours
+provided by OpenCV. But when we found the contours in image using **cv.findContours()** function,
+we have passed an argument, **Contour Retrieval Mode**. We usually passed **cv.RETR_LIST** or
+**cv.RETR_TREE** and it worked nice. But what does it actually mean ?
+
+Also, in the output, we got three arrays, first is the image, second is our contours, and one more
+output which we named as **hierarchy** (Please checkout the codes in previous articles). But we
+never used this hierarchy anywhere. Then what is this hierarchy and what is it for ? What is its
+relationship with the previous mentioned function argument ?
+
+That is what we are going to deal in this article.
+
+### What is Hierarchy?
+
+Normally we use the **cv.findContours()** function to detect objects in an image, right ? Sometimes
+objects are in different locations. But in some cases, some shapes are inside other shapes. Just
+like nested figures. In this case, we call outer one as **parent** and inner one as **child**. This
+way, contours in an image has some relationship to each other. And we can specify how one contour is
+connected to each other, like, is it child of some other contour, or is it a parent etc.
+Representation of this relationship is called the **Hierarchy**.
+
+Consider an example image below :
+
+![image](images/hierarchy.png)
+
+In this image, there are a few shapes which I have numbered from **0-5**. *2 and 2a* denotes the
+external and internal contours of the outermost box.
+
+Here, contours 0,1,2 are **external or outermost**. We can say, they are in **hierarchy-0** or
+simply they are in **same hierarchy level**.
+
+Next comes **contour-2a**. It can be considered as a **child of contour-2** (or in opposite way,
+contour-2 is parent of contour-2a). So let it be in **hierarchy-1**. Similarly contour-3 is child of
+contour-2a and it comes in next hierarchy. Finally contours 4,5 are the children of contour-3a, and
+they come in the last hierarchy level. From the way I numbered the boxes, I would say contour-4 is
+the first child of contour-3a (It can be contour-5 also).
+
+I mentioned these things to understand terms like **same hierarchy level**, **external contour**,
+**child contour**, **parent contour**, **first child** etc. Now let's get into OpenCV.
+
+### Hierarchy Representation in OpenCV
+
+So each contour has its own information regarding what hierarchy it is, who is its child, who is its
+parent etc. OpenCV represents it as an array of four values : **[Next, Previous, First_Child,
+Parent]**
+
+<center>*"Next denotes next contour at the same hierarchical level."*</center>
+
+For eg, take contour-0 in our picture. Who is next contour in its same level ? It is contour-1. So
+simply put Next = 1. Similarly for Contour-1, next is contour-2. So Next = 2.
+
+What about contour-2? There is no next contour in the same level. So simply, put Next = -1. What
+about contour-4? It is in same level with contour-5. So its next contour is contour-5, so Next = 5.
+
+<center>*"Previous denotes previous contour at the same hierarchical level."*</center>
+
+It is same as above. Previous contour of contour-1 is contour-0 in the same level. Similarly for
+contour-2, it is contour-1. And for contour-0, there is no previous, so put it as -1.
+
+<center>*"First_Child denotes its first child contour."*</center>
+
+There is no need of any explanation. For contour-2, child is contour-2a. So it gets the
+corresponding index value of contour-2a. What about contour-3a? It has two children. But we take
+only first child. And it is contour-4. So First_Child = 4 for contour-3a.
+
+<center>*"Parent denotes index of its parent contour."*</center>
+
+It is just opposite of **First_Child**. Both for contour-4 and contour-5, parent contour is
+contour-3a. For contour-3a, it is contour-3 and so on.
+
+@note If there is no child or parent, that field is taken as -1
+
+So now we know about the hierarchy style used in OpenCV, we can check into Contour Retrieval Modes
+in OpenCV with the help of same image given above. ie what do flags like cv.RETR_LIST,
+cv.RETR_TREE, cv.RETR_CCOMP, cv.RETR_EXTERNAL etc mean?
+
+Contour Retrieval Mode
+----------------------
+
+### 1. RETR_LIST
+
+This is the simplest of the four flags (from explanation point of view). It simply retrieves all the
+contours, but doesn't create any parent-child relationship. **Parents and kids are equal under this
+rule, and they are just contours**. ie they all belongs to same hierarchy level.
+
+So here, 3rd and 4th term in hierarchy array is always -1. But obviously, Next and Previous terms
+will have their corresponding values.
+
+### 2. RETR_EXTERNAL
+
+If you use this flag, it returns only extreme outer flags. All child contours are left behind. **We
+can say, under this law, Only the eldest in every family is taken care of. It doesn't care about
+other members of the family)**.
+
+
+### 3. RETR_CCOMP
+
+This flag retrieves all the contours and arranges them to a 2-level hierarchy. ie external contours
+of the object (ie its boundary) are placed in hierarchy-1. And the contours of holes inside object
+(if any) is placed in hierarchy-2. If any object inside it, its contour is placed again in
+hierarchy-1 only. And its hole in hierarchy-2 and so on.
+
+Just consider the image of a "big white zero" on a black background. Outer circle of zero belongs to
+first hierarchy, and inner circle of zero belongs to second hierarchy.
+
+We can explain it with a simple image. Here I have labelled the order of contours in red color and
+the hierarchy they belongs to, in green color (either 1 or 2). The order is same as the order OpenCV
+detects contours.
+
+![image](images/ccomp_hierarchy.png)
+
+So consider first contour, ie contour-0. It is hierarchy-1. It has two holes, contours 1&2, and they
+belong to hierarchy-2. So for contour-0, Next contour in same hierarchy level is contour-3. And
+there is no previous one. And its first is child is contour-1 in hierarchy-2. It has no parent,
+because it is in hierarchy-1. So its hierarchy array is [3,-1,1,-1]
+
+Now take contour-1. It is in hierarchy-2. Next one in same hierarchy (under the parenthood of
+contour-1) is contour-2. No previous one. No child, but parent is contour-0. So array is
+[2,-1,-1,0].
+
+Similarly contour-2 : It is in hierarchy-2. There is not next contour in same hierarchy under
+contour-0. So no Next. Previous is contour-1. No child, parent is contour-0. So array is
+[-1,1,-1,0].
+
+Contour - 3 : Next in hierarchy-1 is contour-5. Previous is contour-0. Child is contour-4 and no
+parent. So array is [5,0,4,-1].
+
+Contour - 4 : It is in hierarchy 2 under contour-3 and it has no sibling. So no next, no previous,
+no child, parent is contour-3. So array is [-1,-1,-1,3].
+
+
+### 4. RETR_TREE
+
+And this is the final guy, Mr.Perfect. It retrieves all the contours and creates a full family
+hierarchy list. **It even tells, who is the grandpa, father, son, grandson and even beyond... :)**.
+
+For examle, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the
+result given by OpenCV and analyze it. Again, red letters give the contour number and green letters
+give the hierarchy order.
+
+![image](images/tree_hierarchy.png)
+
+Take contour-0 : It is in hierarchy-0. Next contour in same hierarchy is contour-7. No previous
+contours. Child is contour-1. And no parent. So array is [7,-1,1,-1].
+
+Take contour-2 : It is in hierarchy-1. No contour in same level. No previous one. Child is
+contour-2. Parent is contour-0. So array is [-1,-1,2,0].
--- /dev/null
+Contours : More Functions {#tutorial_js_contours_more_functions}
+=========================
+
+Goal
+----
+
+- Convexity defects and how to find them.
+- Finding shortest distance from a point to a polygon
+- Matching different shapes
+
+Theory and Code
+---------------
+
+### 1. Convexity Defects
+
+We saw what is convex hull in second chapter about contours. Any deviation of the object from this
+hull can be considered as convexity defect.We can visualize it using an image. We draw a
+line joining start point and end point, then draw a circle at the farthest point.
+
+@note Remember we have to pass returnPoints = False while finding convex hull, in order to find
+convexity defects.
+
+We use the function: **cv.convexityDefects (contour, convexhull, convexityDefect)**
+@param contour input contour.
+@param convexhull convex hull obtained using convexHull that should contain indices of the contour points that make the hull
+@param convexityDefect the output vector of convexity defects. Each convexity defect is represented as 4-element(start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices in the original contour of the convexity defect beginning, end and the farthest point, and fixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the farthest contour point and the hull. That is, to get the floating-point value of the depth will be fixpt_depth/256.0.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contours_more_functions_convexityDefects.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 2. Point Polygon Test
+
+This function finds the shortest distance between a point in the image and a contour. It returns the
+distance which is negative when point is outside the contour, positive when point is inside and zero
+if point is on the contour.
+
+We use the function: **cv.pointPolygonTest (contour, pt, measureDist)**
+@param contour input contour.
+@param pt point tested against the contour.
+@param measureDist if true, the function estimates the signed distance from the point to the nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not.
+
+@code{.js}
+let dist = cv.pointPolygonTest(cnt, new cv.Point(50, 50), true);
+@endcode
+
+### 3. Match Shapes
+
+OpenCV comes with a function **cv.matchShapes()** which enables us to compare two shapes, or two
+contours and returns a metric showing the similarity. The lower the result, the better match it is.
+It is calculated based on the hu-moment values. Different measurement methods are explained in the
+docs.
+
+We use the function: **cv.matchShapes (contour1, contour2, method, parameter)**
+@param contour1 first contour or grayscale image.
+@param contour2 second contour or grayscale image.
+@param method comparison method, see cv::ShapeMatchModes
+@param parameter method-specific parameter(not supported now).
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_contours_more_functions_shape.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Contours in OpenCV.js {#tutorial_js_table_of_contents_contours}
+==================
+
+- @subpage tutorial_js_contours_begin
+
+ Learn to find and draw Contours.
+
+- @subpage tutorial_js_contour_features
+
+ Learn
+ to find different features of contours like area, perimeter, bounding rectangle etc.
+
+- @subpage tutorial_js_contour_properties
+
+ Learn
+ to find different properties of contours like Solidity, Mean Intensity etc.
+
+- @subpage tutorial_js_contours_more_functions
+
+ Learn
+ to find convexity defects, pointPolygonTest, match different shapes etc.
+
+- @subpage tutorial_js_contours_hierarchy
+
+ Learn
+ about Contour Hierarchy
--- /dev/null
+Smoothing Images {#tutorial_js_filtering}
+================
+
+Goals
+-----
+
+- Blur the images with various low pass filters
+- Apply custom-made filters to images (2D convolution)
+
+2D Convolution ( Image Filtering )
+----------------------------------
+
+As in one-dimensional signals, images also can be filtered with various low-pass filters(LPF),
+high-pass filters(HPF) etc. LPF helps in removing noises, blurring the images etc. HPF filters helps
+in finding edges in the images.
+
+OpenCV provides a function **cv.filter2D()** to convolve a kernel with an image. As an example, we
+will try an averaging filter on an image. A 5x5 averaging filter kernel will look like below:
+
+\f[K = \frac{1}{25} \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \end{bmatrix}\f]
+
+We use the functions: **cv.filter2D (src, dst, ddepth, kernel, anchor = new cv.Point(-1, -1), delta = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and the same number of channels as src.
+@param ddepth desired depth of the destination image.
+@param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point matrix; if you want to apply different kernels to different channels, split the image into separate color planes using split and process them individually.
+@param anchor anchor of the kernel that indicates the relative position of a filtered point within the kernel; the anchor should lie within the kernel; default value new cv.Point(-1, -1) means that the anchor is at the kernel center.
+@param delta optional value added to the filtered pixels before storing them in dst.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_filtering_filter.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Image Blurring (Image Smoothing)
+--------------------------------
+
+Image blurring is achieved by convolving the image with a low-pass filter kernel. It is useful for
+removing noises. It actually removes high frequency content (eg: noise, edges) from the image. So
+edges are blurred a little bit in this operation. (Well, there are blurring techniques which doesn't
+blur the edges too). OpenCV provides mainly four types of blurring techniques.
+
+### 1. Averaging
+
+This is done by convolving image with a normalized box filter. It simply takes the average of all
+the pixels under kernel area and replace the central element. This is done by the function
+**cv.blur()** or **cv.boxFilter()**. Check the docs for more details about the kernel. We should
+specify the width and height of kernel. A 3x3 normalized box filter would look like below:
+
+\f[K = \frac{1}{9} \begin{bmatrix} 1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1 \end{bmatrix}\f]
+
+We use the functions: **cv.blur (src, dst, ksize, anchor = new cv.Point(-1, -1), borderType = cv.BORDER_DEFAULT)**
+@param src input image; it can have any number of channels, which are processed independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
+@param dst output image of the same size and type as src.
+@param ksize blurring kernel size.
+@param anchor anchor point; anchor = new cv.Point(-1, -1) means that the anchor is at the kernel center.
+@param borderType border mode used to extrapolate pixels outside of the image(see cv.BorderTypes).
+
+**cv.boxFilter (src, dst, ddepth, ksize, anchor = new cv.Point(-1, -1), normalize = true, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and type as src.
+@param ddepth the output image depth (-1 to use src.depth()).
+@param ksize blurring kernel size.
+@param anchor anchor point; anchor = new cv.Point(-1, -1) means that the anchor is at the kernel center.
+@param normalize flag, specifying whether the kernel is normalized by its area or not.
+@param borderType border mode used to extrapolate pixels outside of the image(see cv.BorderTypes).
+
+@note If you don't want to use normalized box filter, use **cv.boxFilter()**. Pass an argument
+normalize = false to the function.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_filtering_blur.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 2. Gaussian Blurring
+
+In this, instead of box filter, gaussian kernel is used.
+
+We use the function: **cv.GaussianBlur (src, dst, ksize, sigmaX, sigmaY = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image; the image can have any number of channels, which are processed independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
+@param dst output image of the same size and type as src.
+@param ksize blurring kernel size.
+@param sigmaX Gaussian kernel standard deviation in X direction.
+@param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height, to fully control the result regardless of possible future modifications of all this semantics, it is recommended to specify all of ksize, sigmaX, and sigmaY.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_filtering_GaussianBlur.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 3. Median Blurring
+
+Here, the function **cv.medianBlur()** takes median of all the pixels under kernel area and central
+element is replaced with this median value. This is highly effective against salt-and-pepper noise
+in the images. Interesting thing is that, in the above filters, central element is a newly
+calculated value which may be a pixel value in the image or a new value. But in median blurring,
+central element is always replaced by some pixel value in the image. It reduces the noise
+effectively. Its kernel size should be a positive odd integer.
+
+We use the function: **cv.medianBlur (src, dst, ksize)**
+@param src input 1, 3, or 4 channel image; when ksize is 3 or 5, the image depth should be cv.CV_8U, cv.CV_16U, or cv.CV_32F, for larger aperture sizes, it can only be cv.CV_8U.
+@param dst destination array of the same size and type as src.
+@param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
+
+@note The median filter uses cv.BORDER_REPLICATE internally to cope with border pixels.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_filtering_medianBlur.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 4. Bilateral Filtering
+
+**cv.bilateralFilter()** is highly effective in noise removal while keeping edges sharp. But the
+operation is slower compared to other filters. We already saw that gaussian filter takes the a
+neighbourhood around the pixel and find its gaussian weighted average. This gaussian filter is a
+function of space alone, that is, nearby pixels are considered while filtering. It doesn't consider
+whether pixels have almost same intensity. It doesn't consider whether pixel is an edge pixel or
+not. So it blurs the edges also, which we don't want to do.
+
+Bilateral filter also takes a gaussian filter in space, but one more gaussian filter which is a
+function of pixel difference. Gaussian function of space make sure only nearby pixels are considered
+for blurring while gaussian function of intensity difference make sure only those pixels with
+similar intensity to central pixel is considered for blurring. So it preserves the edges since
+pixels at edges will have large intensity variation.
+
+We use the function: **cv.bilateralFilter (src, dst, d, sigmaColor, sigmaSpace, borderType = cv.BORDER_DEFAULT)**
+@param src source 8-bit or floating-point, 1-channel or 3-channel image.
+@param dst output image of the same size and type as src.
+@param d diameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace.
+@param sigmaColor filter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood will be mixed together, resulting in larger areas of semi-equal color.
+@param sigmaSpace filter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough. When d>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is proportional to sigmaSpace.
+@param borderType border mode used to extrapolate pixels outside of the image(see cv.BorderTypes).
+
+@note For simplicity, you can set the 2 sigma values to be the same. If they are small (< 10), the filter will not have much effect, whereas if they are large (> 150), they will have a very strong effect, making the image look "cartoonish". Large filters (d > 5) are very slow, so it is recommended to use d=5 for real-time applications, and perhaps d=9 for offline applications that need heavy noise filtering.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_filtering_bilateralFilter.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Geometric Transformations of Images {#tutorial_js_geometric_transformations}
+===================================
+
+Goals
+-----
+
+- Learn how to apply different geometric transformation to images like translation, rotation, affine
+ transformation etc.
+- You will learn these functions: **cv.resize**, **cv.warpAffine**, **cv.getAffineTransform** and **cv.warpPerspective**
+
+Transformations
+---------------
+
+
+### Scaling
+
+Scaling is just resizing of the image. OpenCV comes with a function **cv.resize()** for this
+purpose. The size of the image can be specified manually, or you can specify the scaling factor.
+Different interpolation methods are used. Preferable interpolation methods are **cv.INTER_AREA**
+for shrinking and **cv.INTER_CUBIC** (slow) & **cv.INTER_LINEAR** for zooming.
+
+We use the function: **cv.resize (src, dst, dsize, fx = 0, fy = 0, interpolation = cv.INTER_LINEAR)**
+@param src input image
+@param dst output image; it has the size dsize (when it is non-zero) or the size computed from src.size(), fx, and fy; the type of dst is the same as of src.
+@param dsize output image size; if it equals zero, it is computed as:
+ \f[𝚍𝚜𝚒𝚣𝚎 = 𝚂𝚒𝚣𝚎(𝚛𝚘𝚞𝚗𝚍(𝚏𝚡*𝚜𝚛𝚌.𝚌𝚘𝚕𝚜), 𝚛𝚘𝚞𝚗𝚍(𝚏𝚢*𝚜𝚛𝚌.𝚛𝚘𝚠𝚜))\f]
+ Either dsize or both fx and fy must be non-zero.
+@param fx scale factor along the horizontal axis; when it equals 0, it is computed as \f[(𝚍𝚘𝚞𝚋𝚕𝚎)𝚍𝚜𝚒𝚣𝚎.𝚠𝚒𝚍𝚝𝚑/𝚜𝚛𝚌.𝚌𝚘𝚕𝚜\f]
+
+@param fy scale factor along the vertical axis; when it equals 0, it is computed as \f[(𝚍𝚘𝚞𝚋𝚕𝚎)𝚍𝚜𝚒𝚣𝚎.𝚑𝚎𝚒𝚐𝚑𝚝/𝚜𝚛𝚌.𝚛𝚘𝚠𝚜\f]
+@param interpolation interpolation method(see **cv.InterpolationFlags**)
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_geometric_transformations_resize.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### Translation
+
+Translation is the shifting of object's location. If you know the shift in (x,y) direction, let it
+be \f$(t_x,t_y)\f$, you can create the transformation matrix \f$\textbf{M}\f$ as follows:
+
+\f[M = \begin{bmatrix} 1 & 0 & t_x \\ 0 & 1 & t_y \end{bmatrix}\f]
+
+We use the function: **cv.warpAffine (src, dst, M, dsize, flags = cv.INTER_LINEAR, borderMode = cv.BORDER_CONSTANT, borderValue = new cv.Scalar())**
+@param src input image.
+@param dst output image that has the size dsize and the same type as src.
+@param Mat 2 × 3 transformation matrix(cv.CV_64FC1 type).
+@param dsize size of the output image.
+@param flags combination of interpolation methods(see cv.InterpolationFlags) and the optional flag WARP_INVERSE_MAP that means that M is the inverse transformation ( 𝚍𝚜𝚝→𝚜𝚛𝚌 )
+@param borderMode pixel extrapolation method (see cv.BorderTypes); when borderMode = BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to the "outliers" in the source image are not modified by the function.
+@param borderValue value used in case of a constant border; by default, it is 0.
+
+rows.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_geometric_transformations_warpAffine.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### Rotation
+
+Rotation of an image for an angle \f$\theta\f$ is achieved by the transformation matrix of the form
+
+\f[M = \begin{bmatrix} cos\theta & -sin\theta \\ sin\theta & cos\theta \end{bmatrix}\f]
+
+But OpenCV provides scaled rotation with adjustable center of rotation so that you can rotate at any
+location you prefer. Modified transformation matrix is given by
+
+\f[\begin{bmatrix} \alpha & \beta & (1- \alpha ) \cdot center.x - \beta \cdot center.y \\ - \beta & \alpha & \beta \cdot center.x + (1- \alpha ) \cdot center.y \end{bmatrix}\f]
+
+where:
+
+\f[\begin{array}{l} \alpha = scale \cdot \cos \theta , \\ \beta = scale \cdot \sin \theta \end{array}\f]
+
+We use the function: **cv.getRotationMatrix2D (center, angle, scale)**
+@param center center of the rotation in the source image.
+@param angle rotation angle in degrees. Positive values mean counter-clockwise rotation (the coordinate origin is assumed to be the top-left corner).
+@param scale isotropic scale factor.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_geometric_transformations_rotateWarpAffine.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### Affine Transformation
+
+In affine transformation, all parallel lines in the original image will still be parallel in the
+output image. To find the transformation matrix, we need three points from input image and their
+corresponding locations in output image. Then **cv.getAffineTransform** will create a 2x3 matrix
+which is to be passed to **cv.warpAffine**.
+
+We use the function: **cv.getAffineTransform (src, dst)**
+
+@param src three points([3, 1] size and cv.CV_32FC2 type) from input imag.
+@param dst three corresponding points([3, 1] size and cv.CV_32FC2 type) in output image.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_geometric_transformations_getAffineTransform.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### Perspective Transformation
+
+For perspective transformation, you need a 3x3 transformation matrix. Straight lines will remain straight even after the transformation. To find this transformation matrix, you need 4 points on the input image and corresponding points on the output image. Among these 4 points, 3 of them should not be collinear. Then transformation matrix can be found by the function **cv.getPerspectiveTransform**. Then apply **cv.warpPerspective** with this 3x3 transformation matrix.
+
+We use the functions: **cv.warpPerspective (src, dst, M, dsize, flags = cv.INTER_LINEAR, borderMode = cv.BORDER_CONSTANT, borderValue = new cv.Scalar())**
+
+@param src input image.
+@param dst output image that has the size dsize and the same type as src.
+@param Mat 3 × 3 transformation matrix(cv.CV_64FC1 type).
+@param dsize size of the output image.
+@param flags combination of interpolation methods (cv.INTER_LINEAR or cv.INTER_NEAREST) and the optional flag WARP_INVERSE_MAP, that sets M as the inverse transformation (𝚍𝚜𝚝→𝚜𝚛𝚌).
+@param borderMode pixel extrapolation method (cv.BORDER_CONSTANT or cv.BORDER_REPLICATE).
+@param borderValue value used in case of a constant border; by default, it is 0.
+
+**cv.getPerspectiveTransform (src, dst)**
+
+@param src coordinates of quadrangle vertices in the source image.
+@param dst coordinates of the corresponding quadrangle vertices in the destination image.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_geometric_transformations_warpPerspective.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Foreground Extraction using GrabCut Algorithm {#tutorial_js_grabcut}
+=========================================================
+
+Goal
+----
+
+- We will learn GrabCut algorithm to extract foreground in images
+
+Theory
+------
+
+GrabCut algorithm was designed by Carsten Rother, Vladimir Kolmogorov & Andrew Blake from Microsoft
+Research Cambridge, UK. in their paper, ["GrabCut": interactive foreground extraction using iterated
+graph cuts](http://dl.acm.org/citation.cfm?id=1015720) . An algorithm was needed for foreground
+extraction with minimal user interaction, and the result was GrabCut.
+
+How it works from user point of view ? Initially user draws a rectangle around the foreground region
+(foreground region should be completely inside the rectangle). Then algorithm segments it
+iteratively to get the best result. Done. But in some cases, the segmentation won't be fine, like,
+it may have marked some foreground region as background and vice versa. In that case, user need to
+do fine touch-ups. Just give some strokes on the images where some faulty results are there. Strokes
+basically says *"Hey, this region should be foreground, you marked it background, correct it in next
+iteration"* or its opposite for background. Then in the next iteration, you get better results.
+
+What happens in background ?
+
+- User inputs the rectangle. Everything outside this rectangle will be taken as sure background
+ (That is the reason it is mentioned before that your rectangle should include all the
+ objects). Everything inside rectangle is unknown. Similarly any user input specifying
+ foreground and background are considered as hard-labelling which means they won't change in
+ the process.
+- Computer does an initial labelling depeding on the data we gave. It labels the foreground and
+ background pixels (or it hard-labels)
+- Now a Gaussian Mixture Model(GMM) is used to model the foreground and background.
+- Depending on the data we gave, GMM learns and create new pixel distribution. That is, the
+ unknown pixels are labelled either probable foreground or probable background depending on its
+ relation with the other hard-labelled pixels in terms of color statistics (It is just like
+ clustering).
+- A graph is built from this pixel distribution. Nodes in the graphs are pixels. Additional two
+ nodes are added, **Source node** and **Sink node**. Every foreground pixel is connected to
+ Source node and every background pixel is connected to Sink node.
+- The weights of edges connecting pixels to source node/end node are defined by the probability
+ of a pixel being foreground/background. The weights between the pixels are defined by the edge
+ information or pixel similarity. If there is a large difference in pixel color, the edge
+ between them will get a low weight.
+- Then a mincut algorithm is used to segment the graph. It cuts the graph into two separating
+ source node and sink node with minimum cost function. The cost function is the sum of all
+ weights of the edges that are cut. After the cut, all the pixels connected to Source node
+ become foreground and those connected to Sink node become background.
+- The process is continued until the classification converges.
+
+It is illustrated in below image (Image Courtesy: <http://www.cs.ru.ac.za/research/g02m1682/>)
+
+![image](images/grabcut_scheme.jpg)
+
+Demo
+----
+
+We use the function: **cv.grabCut (image, mask, rect, bgdModel, fgdModel, iterCount, mode = cv.GC_EVAL)**
+
+@param image input 8-bit 3-channel image.
+@param mask input/output 8-bit single-channel mask. The mask is initialized by the function when mode is set to GC_INIT_WITH_RECT. Its elements may have one of the cv.rabCutClasses.
+@param rect ROI containing a segmented object. The pixels outside of the ROI are marked as "obvious background". The parameter is only used when mode==GC_INIT_WITH_RECT.
+@param bgdModel temporary array for the background model. Do not modify it while you are processing the same image.
+@param fgdModel temporary arrays for the foreground model. Do not modify it while you are processing the same image.
+@param iterCount number of iterations the algorithm should make before returning the result. Note that the result can be refined with further calls with mode==GC_INIT_WITH_MASK or mode==GC_EVAL .
+@param mode operation mode that could be one of the cv::GrabCutModes
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_grabcut_grabCut.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Image Gradients {#tutorial_js_gradients}
+===============
+
+Goal
+----
+
+- Find Image gradients, edges etc
+- We will learn following functions : **cv.Sobel()**, **cv.Scharr()**, **cv.Laplacian()** etc
+
+Theory
+------
+
+OpenCV provides three types of gradient filters or High-pass filters, Sobel, Scharr and Laplacian.
+We will see each one of them.
+
+### 1. Sobel and Scharr Derivatives
+
+Sobel operators is a joint Gausssian smoothing plus differentiation operation, so it is more
+resistant to noise. You can specify the direction of derivatives to be taken, vertical or horizontal
+(by the arguments, yorder and xorder respectively). You can also specify the size of kernel by the
+argument ksize. If ksize = -1, a 3x3 Scharr filter is used which gives better results than 3x3 Sobel
+filter. Please see the docs for kernels used.
+
+We use the functions: **cv.Sobel (src, dst, ddepth, dx, dy, ksize = 3, scale = 1, delta = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and the same number of channels as src.
+@param ddepth output image depth(see cv.combinations); in the case of 8-bit input images it will result in truncated derivatives.
+@param dx order of the derivative x.
+@param dy order of the derivative y.
+@param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
+@param scale optional scale factor for the computed derivative values.
+@param delta optional delta value that is added to the results prior to storing them in dst.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+**cv.Scharr (src, dst, ddepth, dx, dy, scale = 1, delta = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and the same number of channels as src.
+@param ddepth output image depth(see cv.combinations).
+@param dx order of the derivative x.
+@param dy order of the derivative y.
+@param scale optional scale factor for the computed derivative values.
+@param delta optional delta value that is added to the results prior to storing them in dst.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_gradients_Sobel.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 2. Laplacian Derivatives
+
+It calculates the Laplacian of the image given by the relation,
+\f$\Delta src = \frac{\partial ^2{src}}{\partial x^2} + \frac{\partial ^2{src}}{\partial y^2}\f$ where
+each derivative is found using Sobel derivatives. If ksize = 1, then following kernel is used for
+filtering:
+
+\f[kernel = \begin{bmatrix} 0 & 1 & 0 \\ 1 & -4 & 1 \\ 0 & 1 & 0 \end{bmatrix}\f]
+
+We use the function: **cv.Laplacian (src, dst, ddepth, ksize = 1, scale = 1, delta = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and the same number of channels as src.
+@param ddepth output image depth.
+@param ksize aperture size used to compute the second-derivative filters.
+@param scale optional scale factor for the computed Laplacian values.
+@param delta optional delta value that is added to the results prior to storing them in dst.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_gradients_Laplacian.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+One Important Matter!
+---------------------
+
+In our last example, output datatype is cv.CV_8U. But there is a slight problem with
+that. Black-to-White transition is taken as Positive slope (it has a positive value) while
+White-to-Black transition is taken as a Negative slope (It has negative value). So when you convert
+data to cv.CV_8U, all negative slopes are made zero. In simple words, you miss that edge.
+
+If you want to detect both edges, better option is to keep the output datatype to some higher forms,
+like cv.CV_16S, cv.CV_64F etc, take its absolute value and then convert back to cv.CV_8U.
+Below code demonstrates this procedure for a horizontal Sobel filter and difference in results.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_gradients_absSobel.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Histogram - 3 : Histogram Backprojection {#tutorial_js_histogram_backprojection}
+========================================
+
+Goal
+----
+
+- We will learn about histogram backprojection.
+
+Theory
+------
+
+It was proposed by **Michael J. Swain , Dana H. Ballard** in their paper **Indexing via color
+histograms**.
+
+**What is it actually in simple words?** It is used for image segmentation or finding objects of
+interest in an image. In simple words, it creates an image of the same size (but single channel) as
+that of our input image, where each pixel corresponds to the probability of that pixel belonging to
+our object. In more simpler worlds, the output image will have our object of interest in more white
+compared to remaining part. Well, that is an intuitive explanation. (I can't make it more simpler).
+Histogram Backprojection is used with camshift algorithm etc.
+
+**How do we do it ?** We create a histogram of an image containing our object of interest (in our
+case, the ground, leaving player and other things). The object should fill the image as far as
+possible for better results. And a color histogram is preferred over grayscale histogram, because
+color of the object is a better way to define the object than its grayscale intensity. We then
+"back-project" this histogram over our test image where we need to find the object, ie in other
+words, we calculate the probability of every pixel belonging to the ground and show it. The
+resulting output on proper thresholding gives us the ground alone.
+
+Backprojection in OpenCV
+------------------------
+
+We use the functions: **cv.calcBackProject (images, channels, hist, dst, ranges, scale)**
+
+@param images source arrays. They all should have the same depth, cv.CV_8U, cv.CV_16U or cv.CV_32F , and the same size. Each of them can have an arbitrary number of channels.
+@param channels the list of channels used to compute the back projection. The number of channels must match the histogram dimensionality.
+@param hist input histogram that can be dense or sparse.
+@param dst destination back projection array that is a single-channel array of the same size and depth as images[0].
+@param ranges array of arrays of the histogram bin boundaries in each dimension(see cv.calcHist).
+@param scale optional scale factor for the output back projection.
+
+**cv.normalize (src, dst, alpha = 1, beta = 0, norm_type = cv.NORM_L2, dtype = -1, mask = new cv.Mat())**
+
+@param src input array.
+@param dst output array of the same size as src .
+@param alpha norm value to normalize to or the lower range boundary in case of the range normalization.
+@param beta upper range boundary in case of the range normalization; it is not used for the norm normalization.
+@param norm_type normalization type (see cv.NormTypes).
+@param dtype when negative, the output array has the same type as src; otherwise, it has the same number of channels as src and the depth = CV_MAT_DEPTH(dtype).
+@param mask optional operation mask.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_histogram_backprojection_calcBackProject.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Histograms - 1 : Find, Plot, Analyze !!! {#tutorial_js_histogram_begins}
+========================================
+
+Goal
+----
+
+- Find histograms
+- Plot histograms
+- You will learn the function: **cv.calcHist()**.
+
+Theory
+------
+
+So what is histogram ? You can consider histogram as a graph or plot, which gives you an overall
+idea about the intensity distribution of an image. It is a plot with pixel values (ranging from 0 to
+255, not always) in X-axis and corresponding number of pixels in the image on Y-axis.
+
+It is just another way of understanding the image. By looking at the histogram of an image, you get
+intuition about contrast, brightness, intensity distribution etc of that image. Almost all image
+processing tools today, provides features on histogram. Below is an image from [Cambridge in Color
+website](http://www.cambridgeincolour.com/tutorials/histograms1.htm), and I recommend you to visit
+the site for more details.
+
+![image](histogram_sample.jpg)
+
+You can see the image and its histogram. (Remember, this histogram is drawn for grayscale image, not
+color image). Left region of histogram shows the amount of darker pixels in image and right region
+shows the amount of brighter pixels. From the histogram, you can see dark region is more than
+brighter region, and amount of midtones (pixel values in mid-range, say around 127) are very less.
+
+Find Histogram
+--------------
+
+We use the function: **cv.calcHist (image, channels, mask, hist, histSize, ranges, accumulate = false)**
+
+@param image source arrays. They all should have the same depth, cv.CV_8U, cv.CV_16U or cv.CV_32F , and the same size. Each of them can have an arbitrary number of channels.
+@param channels list of the dims channels used to compute the histogram.
+@param mask optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as images[i] . The non-zero mask elements mark the array elements counted in the histogram.
+@param hist output histogram(cv.CV_32F type), which is a dense or sparse dims -dimensional array.
+@param histSize array of histogram sizes in each dimension.
+@param ranges array of the dims arrays of the histogram bin boundaries in each dimension.
+@param accumulate accumulation flag. If it is set, the histogram is not cleared in the beginning when it is allocated. This feature enables you to compute a single histogram from several sets of arrays, or to update the histogram in time.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_histogram_begins_calcHist.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Histograms - 2: Histogram Equalization {#tutorial_js_histogram_equalization}
+======================================
+
+Goal
+----
+
+- We will learn the concepts of histogram equalization and use it to improve the contrast of our
+ images.
+
+Theory
+------
+
+Consider an image whose pixel values are confined to some specific range of values only. For eg,
+brighter image will have all pixels confined to high values. But a good image will have pixels from
+all regions of the image. So you need to stretch this histogram to either ends (as given in below
+image, from wikipedia) and that is what Histogram Equalization does (in simple words). This normally
+improves the contrast of the image.
+
+![image](images/histogram_equalization.png)
+
+I would recommend you to read the wikipedia page on [Histogram
+Equalization](http://en.wikipedia.org/wiki/Histogram_equalization) for more details about it. It has
+a very good explanation with worked out examples, so that you would understand almost everything
+after reading that.
+
+Histograms Equalization in OpenCV
+---------------------------------
+
+We use the function: **cv.equalizeHist (src, dst)**
+
+@param src source 8-bit single channel image.
+@param dst destination image of the same size and type as src.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_histogram_equalization_equalizeHist.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+CLAHE (Contrast Limited Adaptive Histogram Equalization)
+--------------------------------------------------------
+
+In **adaptive histogram equalization**, image is divided into small blocks called "tiles" (tileSize is 8x8 by default in OpenCV). Then each of these blocks are histogram equalized as usual. So in a small area, histogram would confine to a small region
+(unless there is noise). If noise is there, it will be amplified. To avoid this, **contrast limiting** is applied. If any histogram bin is above the specified contrast limit (by default 40 in OpenCV), those pixels are clipped and distributed uniformly to other bins before applying histogram equalization. After equalization, to remove artifacts in tile borders, bilinear interpolation is applied.
+
+We use the class: **cv.CLAHE (clipLimit = 40, tileGridSize = new cv.Size(8, 8))**
+
+@param clipLimit threshold for contrast limiting.
+@param tileGridSize size of grid for histogram equalization. Input image will be divided into equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
+
+@note Don't forget to delete CLAHE!
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_histogram_equalization_createCLAHE.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Histograms in OpenCV.js {#tutorial_js_table_of_contents_histograms}
+====================
+
+- @subpage tutorial_js_histogram_begins
+
+ Learn to find and draw Contours
+
+- @subpage tutorial_js_histogram_equalization
+
+ Learn to Equalize Histograms to get better contrast for images
+
+- @subpage tutorial_js_histogram_backprojection
+
+ Learn histogram backprojection to segment colored objects
--- /dev/null
+Hough Circle Transform {#tutorial_js_houghcircles}
+======================
+
+Goal
+----
+
+- We will learn to use Hough Transform to find circles in an image.
+- We will learn these functions: **cv.HoughCircles()**
+
+Theory
+------
+
+A circle is represented mathematically as \f$(x-x_{center})^2 + (y - y_{center})^2 = r^2\f$ where
+\f$(x_{center},y_{center})\f$ is the center of the circle, and \f$r\f$ is the radius of the circle. From
+equation, we can see we have 3 parameters, so we need a 3D accumulator for hough transform, which
+would be highly ineffective. So OpenCV uses more trickier method, **Hough Gradient Method** which
+uses the gradient information of edges.
+
+We use the function: **cv.HoughCircles (image, circles, method, dp, minDist, param1 = 100, param2 = 100, minRadius = 0, maxRadius = 0)**
+
+@param image 8-bit, single-channel, grayscale input image.
+@param circles output vector of found circles(cv.CV_32FC3 type). Each vector is encoded as a 3-element floating-point vector (x,y,radius) .
+@param method detection method(see cv.HoughModes). Currently, the only implemented method is HOUGH_GRADIENT
+@param dp inverse ratio of the accumulator resolution to the image resolution. For example, if dp = 1 , the accumulator has the same resolution as the input image. If dp = 2 , the accumulator has half as big width and height.
+@param minDist minimum distance between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed.
+@param param1 first method-specific parameter. In case of HOUGH_GRADIENT , it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
+@param param2 second method-specific parameter. In case of HOUGH_GRADIENT , it is the accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first.
+@param minRadius minimum circle radius.
+@param maxRadius maximum circle radius.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_houghcircles_HoughCirclesP.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Hough Line Transform {#tutorial_js_houghlines}
+====================
+
+Goal
+----
+
+- We will understand the concept of the Hough Transform.
+- We will learn how to use it to detect lines in an image.
+- We will learn the following functions: **cv.HoughLines()**, **cv.HoughLinesP()**
+
+Theory
+------
+
+The Hough Transform is a popular technique to detect any shape, if you can represent that shape in a
+mathematical form. It can detect the shape even if it is broken or distorted a little bit. We will
+see how it works for a line.
+
+A line can be represented as \f$y = mx+c\f$ or in a parametric form, as
+\f$\rho = x \cos \theta + y \sin \theta\f$ where \f$\rho\f$ is the perpendicular distance from the origin to the
+line, and \f$\theta\f$ is the angle formed by this perpendicular line and the horizontal axis measured in
+counter-clockwise (That direction varies on how you represent the coordinate system. This
+representation is used in OpenCV). Check the image below:
+
+![image](images/houghlines1.svg)
+
+So if the line is passing below the origin, it will have a positive rho and an angle less than 180. If it
+is going above the origin, instead of taking an angle greater than 180, the angle is taken less than 180,
+and rho is taken negative. Any vertical line will have 0 degree and horizontal lines will have 90
+degree.
+
+Now let's see how the Hough Transform works for lines. Any line can be represented in these two terms,
+\f$(\rho, \theta)\f$. So first it creates a 2D array or accumulator (to hold the values of the two parameters)
+and it is set to 0 initially. Let rows denote the \f$\rho\f$ and columns denote the \f$\theta\f$. Size of
+array depends on the accuracy you need. Suppose you want the accuracy of angles to be 1 degree, you will
+need 180 columns. For \f$\rho\f$, the maximum distance possible is the diagonal length of the image. So
+taking one pixel accuracy, the number of rows can be the diagonal length of the image.
+
+Consider a 100x100 image with a horizontal line at the middle. Take the first point of the line. You
+know its (x,y) values. Now in the line equation, put the values \f$\theta = 0,1,2,....,180\f$ and check
+the \f$\rho\f$ you get. For every \f$(\rho, \theta)\f$ pair, you increment value by one in our accumulator
+in its corresponding \f$(\rho, \theta)\f$ cells. So now in accumulator, the cell (50,90) = 1 along with
+some other cells.
+
+Now take the second point on the line. Do the same as above. Increment the values in the cells
+corresponding to \f$(\rho, \theta)\f$ you got. This time, the cell (50,90) = 2. What you actually
+do is voting the \f$(\rho, \theta)\f$ values. You continue this process for every point on the line. At
+each point, the cell (50,90) will be incremented or voted up, while other cells may or may not be
+voted up. This way, at the end, the cell (50,90) will have maximum votes. So if you search the
+accumulator for maximum votes, you get the value (50,90) which says, there is a line in this image
+at a distance 50 from the origin and at angle 90 degrees. It is well shown in the below animation (Image
+Courtesy: [Amos Storkey](http://homepages.inf.ed.ac.uk/amos/hough.html) )
+
+![](houghlinesdemo.gif)
+
+This is how hough transform works for lines. It is simple. Below is an image which shows the accumulator. Bright spots at some locations
+denote they are the parameters of possible lines in the image. (Image courtesy: [Wikipedia](http://en.wikipedia.org/wiki/Hough_transform) )
+
+![](houghlines2.jpg)
+
+Hough Transform in OpenCV
+=========================
+
+Everything explained above is encapsulated in the OpenCV function, **cv.HoughLines()**. It simply returns an array of (\f$(\rho, \theta)\f$ values. \f$\rho\f$ is measured in pixels and \f$\theta\f$ is measured in radians. First parameter,
+Input image should be a binary image, so apply threshold or use canny edge detection before
+applying hough transform.
+
+We use the function: **cv.HoughLines (image, lines, rho, theta, threshold, srn = 0, stn = 0, min_theta = 0, max_theta = Math.PI)**
+@param image 8-bit, single-channel binary source image. The image may be modified by the function.
+@param lines output vector of lines(cv.32FC2 type). Each line is represented by a two-element vector (ρ,θ) . ρ is the distance from the coordinate origin (0,0). θ is the line rotation angle in radians.
+@param rho distance resolution of the accumulator in pixels.
+@param theta angle resolution of the accumulator in radians.
+@param threshold accumulator threshold parameter. Only those lines are returned that get enough votes
+@param srn for the multi-scale Hough transform, it is a divisor for the distance resolution rho . The coarse accumulator distance resolution is rho and the accurate accumulator resolution is rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these parameters should be positive.
+@param stn for the multi-scale Hough transform, it is a divisor for the distance resolution theta.
+@param min_theta for standard and multi-scale Hough transform, minimum angle to check for lines. Must fall between 0 and max_theta.
+@param max_theta for standard and multi-scale Hough transform, maximum angle to check for lines. Must fall between min_theta and CV_PI.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_houghlines_HoughLines.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Probabilistic Hough Transform
+-----------------------------
+
+In the hough transform, you can see that even for a line with two arguments, it takes a lot of
+computation. Probabilistic Hough Transform is an optimization of the Hough Transform we saw. It doesn't
+take all the points into consideration. Instead, it takes only a random subset of points which is
+sufficient for line detection. Just we have to decrease the threshold. See image below which compares
+Hough Transform and Probabilistic Hough Transform in Hough space. (Image Courtesy :
+[Franck Bettinger's home page](http://phdfb1.free.fr/robot/mscthesis/node14.html) )
+
+![image](images/houghlines4.png)
+
+OpenCV implementation is based on Robust Detection of Lines Using the Progressive Probabilistic
+Hough Transform by Matas, J. and Galambos, C. and Kittler, J.V. @cite Matas00.
+
+We use the function: **cv.HoughLinesP (image, lines, rho, theta, threshold, minLineLength = 0, maxLineGap = 0)**
+
+@param image 8-bit, single-channel binary source image. The image may be modified by the function.
+@param lines output vector of lines(cv.32SC4 type). Each line is represented by a 4-element vector (x1,y1,x2,y2) ,where (x1,y1) and (x2,y2) are the ending points of each detected line segment.
+@param rho distance resolution of the accumulator in pixels.
+@param theta angle resolution of the accumulator in radians.
+@param threshold accumulator threshold parameter. Only those lines are returned that get enough votes
+@param minLineLength minimum line length. Line segments shorter than that are rejected.
+@param maxLineGap maximum allowed gap between points on the same line to link them.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_houghlines_HoughLinesP.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Image Processing for Video Capture {#tutorial_js_imgproc_camera}
+==================================
+
+Goal
+----
+
+- learn image processing for video capture.
+
+
+\htmlonly
+<iframe src="../../js_imgproc_camera.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Morphological Transformations {#tutorial_js_morphological_ops}
+=============================
+
+Goal
+----
+
+- We will learn different morphological operations like Erosion, Dilation, Opening, Closing
+ etc.
+- We will learn different functions like : **cv.erode()**, **cv.dilate()**,
+ **cv.morphologyEx()** etc.
+
+Theory
+------
+
+Morphological transformations are some simple operations based on the image shape. It is normally
+performed on binary images. It needs two inputs, one is our original image, second one is called
+**structuring element** or **kernel** which decides the nature of operation. Two basic morphological
+operators are Erosion and Dilation. Then its variant forms like Opening, Closing, Gradient etc also
+comes into play. We will see them one-by-one with help of following image:
+
+![image](shape.jpg)
+
+### 1. Erosion
+
+The basic idea of erosion is just like soil erosion only, it erodes away the boundaries of
+foreground object (Always try to keep foreground in white). So what it does? The kernel slides
+through the image (as in 2D convolution). A pixel in the original image (either 1 or 0) will be
+considered 1 only if all the pixels under the kernel is 1, otherwise it is eroded (made to zero).
+
+So what happends is that, all the pixels near boundary will be discarded depending upon the size of
+kernel. So the thickness or size of the foreground object decreases or simply white region decreases
+in the image. It is useful for removing small white noises (as we have seen in colorspace chapter),
+detach two connected objects etc.
+
+We use the function: **cv.erode (src, dst, kernel, anchor = new cv.Point(-1, -1), iterations = 1, borderType = cv.BORDER_CONSTANT, borderValue = cv.morphologyDefaultBorderValue())**
+@param src input image; the number of channels can be arbitrary, but the depth should be one of cv.CV_8U, cv.CV_16U, cv.CV_16S, cv.CV_32F or cv.CV_64F.
+@param dst output image of the same size and type as src.
+@param kernel structuring element used for erosion.
+@param anchor position of the anchor within the element; default value new cv.Point(-1, -1) means that the anchor is at the element center.
+@param iterations number of times erosion is applied.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+@param borderValue border value in case of a constant border
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_morphological_ops_erode.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 2. Dilation
+
+It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel
+is '1'. So it increases the white region in the image or size of foreground object increases.
+Normally, in cases like noise removal, erosion is followed by dilation. Because, erosion removes
+white noises, but it also shrinks our object. So we dilate it. Since noise is gone, they won't come
+back, but our object area increases. It is also useful in joining broken parts of an object.
+
+We use the function: **cv.dilate (src, dst, kernel, anchor = new cv.Point(-1, -1), iterations = 1, borderType = cv.BORDER_CONSTANT, borderValue = cv.morphologyDefaultBorderValue())**
+@param src input image; the number of channels can be arbitrary, but the depth should be one of cv.CV_8U, cv.CV_16U, cv.CV_16S, cv.CV_32F or cv.CV_64F.
+@param dst output image of the same size and type as src.
+@param kernel structuring element used for dilation.
+@param anchor position of the anchor within the element; default value new cv.Point(-1, -1) means that the anchor is at the element center.
+@param iterations number of times dilation is applied.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+@param borderValue border value in case of a constant border
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_morphological_ops_dilate.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 3. Opening
+
+Opening is just another name of **erosion followed by dilation**. It is useful in removing noise.
+
+We use the function: **cv.morphologyEx (src, dst, op, kernel, anchor = new cv.Point(-1, -1), iterations = 1, borderType = cv.BORDER_CONSTANT, borderValue = cv.morphologyDefaultBorderValue())**
+@param src source image. The number of channels can be arbitrary. The depth should be one of cv.CV_8U, cv.CV_16U, cv.CV_16S, cv.CV_32F or cv.CV_64F
+@param dst destination image of the same size and type as source image.
+@param op type of a morphological operation, (see cv.MorphTypes).
+@param kernel structuring element. It can be created using cv.getStructuringElement.
+@param anchor anchor position with the kernel. Negative values mean that the anchor is at the kernel center.
+@param iterations number of times dilation is applied.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+@param borderValue border value in case of a constant border. The default value has a special meaning.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_morphological_ops_opening.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 4. Closing
+
+Closing is reverse of Opening, **Dilation followed by Erosion**. It is useful in closing small holes
+inside the foreground objects, or small black points on the object.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_morphological_ops_closing.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 5. Morphological Gradient
+
+It is the difference between dilation and erosion of an image.
+
+The result will look like the outline of the object.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_morphological_ops_gradient.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 6. Top Hat
+
+It is the difference between input image and Opening of the image.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_morphological_ops_topHat.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+### 7. Black Hat
+
+It is the difference between the closing of the input image and input image.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_morphological_ops_blackHat.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Structuring Element
+-------------------
+
+We manually created a structuring elements in the previous examples with help of cv.Mat.ones. It is
+rectangular shape. But in some cases, you may need elliptical/circular shaped kernels. So for this
+purpose, OpenCV has a function, **cv.getStructuringElement()**. You just pass the shape and size of
+the kernel, you get the desired kernel.
+
+We use the function: **cv.getStructuringElement (shape, ksize, anchor = new cv.Point(-1, -1))**
+@param shape element shape that could be one of cv.MorphShapes
+@param ksize size of the structuring element.
+@param anchor anchor position within the element. The default value [−1,−1] means that the anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor position. In other cases the anchor just regulates how much the result of the morphological operation is shifted.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_morphological_ops_getStructuringElement.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Image Pyramids {#tutorial_js_pyramids}
+==============
+
+Goal
+----
+
+- We will learn about Image Pyramids
+- We will learn these functions: **cv.pyrUp()**, **cv.pyrDown()**
+
+Theory
+------
+
+Normally, we used to work with an image of constant size. But in some occassions, we need to work
+with images of different resolution of the same image. For example, while searching for something in
+an image, like face, we are not sure at what size the object will be present in the image. In that
+case, we will need to create a set of images with different resolution and search for object in all
+the images. These set of images with different resolution are called Image Pyramids (because when
+they are kept in a stack with biggest image at bottom and smallest image at top look like a
+pyramid).
+
+There are two kinds of Image Pyramids. 1) Gaussian Pyramid and 2) Laplacian Pyramids
+
+Higher level (Low resolution) in a Gaussian Pyramid is formed by removing consecutive rows and
+columns in Lower level (higher resolution) image. Then each pixel in higher level is formed by the
+contribution from 5 pixels in underlying level with gaussian weights. By doing so, a \f$M \times N\f$
+image becomes \f$M/2 \times N/2\f$ image. So area reduces to one-fourth of original area. It is called
+an Octave. The same pattern continues as we go upper in pyramid (ie, resolution decreases).
+Similarly while expanding, area becomes 4 times in each level. We can find Gaussian pyramids using
+**cv.pyrDown()** and **cv.pyrUp()** functions.
+
+Laplacian Pyramids are formed from the Gaussian Pyramids. There is no exclusive function for that.
+Laplacian pyramid images are like edge images only. Most of its elements are zeros. They are used in
+image compression. A level in Laplacian Pyramid is formed by the difference between that level in
+Gaussian Pyramid and expanded version of its upper level in Gaussian Pyramid.
+
+Downsample
+------
+
+We use the function: **cv.pyrDown (src, dst, dstsize = new cv.Size(0, 0), borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image; it has the specified size and the same type as src.
+@param dstsize size of the output image.
+@param borderType pixel extrapolation method(see cv.BorderTypes, cv.BORDER_CONSTANT isn't supported).
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_pyramids_pyrDown.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Upsample
+------
+
+We use the function: **cv.pyrUp (src, dst, dstsize = new cv.Size(0, 0), borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image; it has the specified size and the same type as src.
+@param dstsize size of the output image.
+@param borderType pixel extrapolation method(see cv.BorderTypes, only cv.BORDER_DEFAULT is supported).
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_pyramids_pyrUp.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Image Processing {#tutorial_js_table_of_contents_imgproc}
+==========================
+
+- @subpage tutorial_js_colorspaces
+
+ Learn how to change images between different color spaces.
+
+- @subpage tutorial_js_geometric_transformations
+
+ Learn how to apply different geometric transformations to images like rotation, translation etc.
+
+- @subpage tutorial_js_thresholding
+
+ Learn
+ how to convert images to binary images using global thresholding, Adaptive thresholding, Otsu's
+ binarization etc.
+
+- @subpage tutorial_js_filtering
+
+ Learn
+ how to blur the images, filter the images with custom kernels etc.
+
+- @subpage tutorial_js_morphological_ops
+
+ Learn about morphological transformations like Erosion, Dilation, Opening, Closing etc.
+
+- @subpage tutorial_js_gradients
+
+ Learn
+ how to find image gradients, edges etc.
+
+- @subpage tutorial_js_canny
+
+ Learn
+ how to find edges with Canny Edge Detection.
+
+- @subpage tutorial_js_pyramids
+
+ Learn about image pyramids and how to use them for image blending.
+
+- @subpage tutorial_js_table_of_contents_contours
+
+ Learn
+ about Contours in OpenCV.js.
+
+- @subpage tutorial_js_table_of_contents_histograms
+
+ Learn
+ about histograms in OpenCV.js.
+
+- @subpage tutorial_js_table_of_contents_transforms
+
+ Learn
+ different Image Transforms in OpenCV.js like Fourier Transform, Cosine Transform etc.
+
+- @subpage tutorial_js_template_matching
+
+ Learn
+ how to search for an object in an image using Template Matching.
+
+- @subpage tutorial_js_houghlines
+
+ Learn how to detect lines in an image.
+
+- @subpage tutorial_js_houghcircles
+
+ Learn how to detect circles in an image.
+
+- @subpage tutorial_js_watershed
+
+ Learn how to segment images with watershed segmentation.
+
+- @subpage tutorial_js_grabcut
+
+ Learn how to extract foreground with GrabCut algorithm.
+
+- @subpage tutorial_js_imgproc_camera
+
+ Learn image processing for video capture.
--- /dev/null
+Template Matching {#tutorial_js_template_matching}
+=================
+
+Goals
+-----
+
+- To find objects in an image using Template Matching
+- You will learn these functions : **cv.matchTemplate()**, **cv.minMaxLoc()**
+
+Theory
+------
+
+Template Matching is a method for searching and finding the location of a template image in a larger
+image. OpenCV comes with a function **cv.matchTemplate()** for this purpose. It simply slides the
+template image over the input image (as in 2D convolution) and compares the template and patch of
+input image under the template image. Several comparison methods are implemented in OpenCV. (You can
+check docs for more details). It returns a grayscale image, where each pixel denotes how much does
+the neighbourhood of that pixel match with template.
+
+If input image is of size (WxH) and template image is of size (wxh), output image will have a size
+of (W-w+1, H-h+1). Once you got the result, you can use **cv.minMaxLoc()** function to find where
+is the maximum/minimum value. Take it as the top-left corner of rectangle and take (w,h) as width
+and height of the rectangle. That rectangle is your region of template.
+
+@note If you are using cv.TM_SQDIFF as comparison method, minimum value gives the best match.
+
+Template Matching in OpenCV
+---------------------------
+
+We use the function: **cv.matchTemplate (image, templ, result, method, mask = new cv.Mat())**
+
+@param image image where the search is running. It must be 8-bit or 32-bit floating-point.
+@param templ searched template. It must be not greater than the source image and have the same data type.
+@param result map of comparison results. It must be single-channel 32-bit floating-point.
+@param method parameter specifying the comparison method(see cv.TemplateMatchModes).
+@param mask mask of searched template. It must have the same datatype and size with templ. It is not set by default.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_template_matching_matchTemplate.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Image Thresholding {#tutorial_js_thresholding}
+==================
+
+Goal
+----
+
+- In this tutorial, you will learn Simple thresholding, Adaptive thresholding, Otsu's thresholding
+ etc.
+- You will learn these functions : **cv.threshold**, **cv.adaptiveThreshold** etc.
+
+Simple Thresholding
+-------------------
+
+Here, the matter is straight forward. If pixel value is greater than a threshold value, it is
+assigned one value (may be white), else it is assigned another value (may be black).
+
+We use the function: **cv.threshold (src, dst, thresh, maxval, type)**
+@param src input array.
+@param dst output array of the same size and type and the same number of channels as src.
+@param thresh threshold value.
+@param maxval maximum value to use with the cv.THRESH_BINARY and cv.THRESH_BINARY_INV thresholding types.
+@param type thresholding type(see cv.ThresholdTypes).
+
+**thresholding type** - OpenCV provides different styles of thresholding and it is decided
+by the fourth parameter of the function. Different types are:
+
+- cv.THRESH_BINARY
+- cv.THRESH_BINARY_INV
+- cv.THRESH_TRUNC
+- cv.THRESH_TOZERO
+- cv.THRESH_OTSU
+- cv.THRESH_TRIANGLE
+
+@note Input image should be single channel only in case of cv.THRESH_OTSU or cv.THRESH_TRIANGLE flags
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_thresholding_threshold.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Adaptive Thresholding
+---------------------
+
+In the previous section, we used a global value as threshold value. But it may not be good in all
+the conditions where image has different lighting conditions in different areas. In that case, we go
+for adaptive thresholding. In this, the algorithm calculate the threshold for a small regions of the
+image. So we get different thresholds for different regions of the same image and it gives us better
+results for images with varying illumination.
+
+We use the function: **cv.adaptiveThreshold (src, dst, maxValue, adaptiveMethod, thresholdType, blockSize, C)**
+@param src source 8-bit single-channel image.
+@param dst dstination image of the same size and the same type as src.
+@param maxValue non-zero value assigned to the pixels for which the condition is satisfied
+@param adaptiveMethod adaptive thresholding algorithm to use.
+@param thresholdType thresholding type that must be either cv.THRESH_BINARY or cv.THRESH_BINARY_INV.
+@param blockSize size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
+@param C constant subtracted from the mean or weighted mean (see the details below). Normally, it is positive but may be zero or negative as well.
+
+**adaptiveMethod** - It decides how thresholding value is calculated:
+ - cv.ADAPTIVE_THRESH_MEAN_C
+ - cv.ADAPTIVE_THRESH_GAUSSIAN_C
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_thresholding_adaptiveThreshold.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Fourier Transform {#tutorial_js_fourier_transform}
+=================
+
+Goal
+----
+
+- To find the Fourier Transform of images using OpenCV
+- Some applications of Fourier Transform
+- We will learn following functions : **cv.dft()** etc
+
+Theory
+------
+
+Fourier Transform is used to analyze the frequency characteristics of various filters. For images,
+**2D Discrete Fourier Transform (DFT)** is used to find the frequency domain. A fast algorithm
+called **Fast Fourier Transform (FFT)** is used for calculation of DFT. Details about these can be
+found in any image processing or signal processing textbooks.
+
+For a sinusoidal signal, \f$x(t) = A \sin(2 \pi ft)\f$, we can say \f$f\f$ is the frequency of signal, and
+if its frequency domain is taken, we can see a spike at \f$f\f$. If signal is sampled to form a discrete
+signal, we get the same frequency domain, but is periodic in the range \f$[- \pi, \pi]\f$ or \f$[0,2\pi]\f$
+(or \f$[0,N]\f$ for N-point DFT). You can consider an image as a signal which is sampled in two
+directions. So taking fourier transform in both X and Y directions gives you the frequency
+representation of image.
+
+More intuitively, for the sinusoidal signal, if the amplitude varies so fast in short time, you can
+say it is a high frequency signal. If it varies slowly, it is a low frequency signal. You can extend
+the same idea to images. Where does the amplitude varies drastically in images ? At the edge points,
+or noises. So we can say, edges and noises are high frequency contents in an image. If there is no
+much changes in amplitude, it is a low frequency component.
+
+Performance of DFT calculation is better for some array size. It is fastest when array size is power
+of two. The arrays whose size is a product of 2’s, 3’s, and 5’s are also processed quite
+efficiently. So if you are worried about the performance of your code, you can modify the size of
+the array to any optimal size (by padding zeros) before finding DFT. OpenCV provides a function, **cv.getOptimalDFTSize()** for this.
+
+Now we will see how to find the Fourier Transform.
+
+Fourier Transform in OpenCV
+---------------------------
+
+Performance of DFT calculation is better for some array size. It is fastest when array size is power of two. The arrays whose size is a product of 2’s, 3’s, and 5’s are also processed quite efficiently. So if you are worried about the performance of your code, you can modify the size of the array to any optimal size (by padding zeros). So how do we find this optimal size ? OpenCV provides a function, cv.getOptimalDFTSize() for this.
+
+We use the functions: **cv.dft (src, dst, flags = 0, nonzeroRows = 0)**
+
+@param src input array that could be real or complex.
+@param dst output array whose size and type depends on the flags.
+@param flags transformation flags, representing a combination of the cv.DftFlags
+@param nonzeroRows when the parameter is not zero, the function assumes that only the first nonzeroRows rows of the input array (DFT_INVERSE is not set) or only the first nonzeroRows of the output array (DFT_INVERSE is set) contain non-zeros, thus, the function can handle the rest of the rows more efficiently and save some time; this technique is very useful for calculating array cross-correlation or convolution using DFT.
+
+**cv.getOptimalDFTSize (vecsize)**
+
+@param vecsize vector size.
+
+**cv.copyMakeBorder (src, dst, top, bottom, left, right, borderType, value = new cv.Scalar())**
+
+@param src input array that could be real or complex.
+@param dst output array whose size and type depends on the flags.
+@param top parameter specifying how many top pixels in each direction from the source image rectangle to extrapolate.
+@param bottom parameter specifying how many bottom pixels in each direction from the source image rectangle to extrapolate.
+@param left parameter specifying how many left pixels in each direction from the source image rectangle to extrapolate.
+@param right parameter specifying how many right pixels in each direction from the source image rectangle to extrapolate.
+@param borderType border type.
+@param value border value if borderType == cv.BORDER_CONSTANT.
+
+**cv.magnitude (x, y, magnitude)**
+
+@param x floating-point array of x-coordinates of the vectors.
+@param y floating-point array of y-coordinates of the vectors; it must have the same size as x.
+@param magnitude output array of the same size and type as x.
+
+**cv.split (m, mv)**
+
+@param m input multi-channel array.
+@param mv output vector of arrays; the arrays themselves are reallocated, if needed.
+
+**cv.merge (mv, dst)**
+
+@param mv input vector of matrices to be merged; all the matrices in mv must have the same size and the same depth.
+@param dst output array of the same size and the same depth as mv[0]; The number of channels will be the total number of channels in the matrix array.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_fourier_transform_dft.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Image Transforms in OpenCV.js {#tutorial_js_table_of_contents_transforms}
+==========================
+
+- @subpage tutorial_js_fourier_transform
+ Learn to find the Fourier Transform of images
--- /dev/null
+Image Segmentation with Watershed Algorithm {#tutorial_js_watershed}
+===========================================
+
+Goal
+----
+
+- We will learn how to use marker-based image segmentation using watershed algorithm
+- We will learn: **cv.watershed()**
+
+Theory
+------
+
+Any grayscale image can be viewed as a topographic surface where high intensity denotes peaks and
+hills while low intensity denotes valleys. You start filling every isolated valleys (local minima)
+with different colored water (labels). As the water rises, depending on the peaks (gradients)
+nearby, water from different valleys, obviously with different colors will start to merge. To avoid
+that, you build barriers in the locations where water merges. You continue the work of filling water
+and building barriers until all the peaks are under water. Then the barriers you created gives you
+the segmentation result. This is the "philosophy" behind the watershed. You can visit the [CMM
+webpage on watershed](http://cmm.ensmp.fr/~beucher/wtshed.html) to understand it with the help of
+some animations.
+
+But this approach gives you oversegmented result due to noise or any other irregularities in the
+image. So OpenCV implemented a marker-based watershed algorithm where you specify which are all
+valley points are to be merged and which are not. It is an interactive image segmentation. What we
+do is to give different labels for our object we know. Label the region which we are sure of being
+the foreground or object with one color (or intensity), label the region which we are sure of being
+background or non-object with another color and finally the region which we are not sure of
+anything, label it with 0. That is our marker. Then apply watershed algorithm. Then our marker will
+be updated with the labels we gave, and the boundaries of objects will have a value of -1.
+
+Code
+----
+
+Below we will see an example on how to use the Distance Transform along with watershed to segment
+mutually touching objects.
+
+Consider the coins image below, the coins are touching each other. Even if you threshold it, it will
+be touching each other.
+
+We start with finding an approximate estimate of the coins. For that, we can use the Otsu's
+binarization.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_watershed_threshold.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Now we need to remove any small white noises in the image. For that we can use morphological
+opening. To remove any small holes in the object, we can use morphological closing. So, now we know
+for sure that region near to center of objects are foreground and region much away from the object
+are background. Only region we are not sure is the boundary region of coins.
+
+So we need to extract the area which we are sure they are coins. Erosion removes the boundary
+pixels. So whatever remaining, we can be sure it is coin. That would work if objects were not
+touching each other. But since they are touching each other, another good option would be to find
+the distance transform and apply a proper threshold. Next we need to find the area which we are sure
+they are not coins. For that, we dilate the result. Dilation increases object boundary to
+background. This way, we can make sure whatever region in background in result is really a
+background, since boundary region is removed. See the image below.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_watershed_background.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+The remaining regions are those which we don't have any idea, whether it is coins or background.
+Watershed algorithm should find it. These areas are normally around the boundaries of coins where
+foreground and background meet (Or even two different coins meet). We call it border. It can be
+obtained from subtracting sure_fg area from sure_bg area.
+
+We use the function: **cv.distanceTransform (src, dst, distanceType, maskSize, labelType = cv.CV_32F)**
+
+@param src 8-bit, single-channel (binary) source image.
+@param dst output image with calculated distances. It is a 8-bit or 32-bit floating-point, single-channel image of the same size as src.
+@param distanceType type of distance(see cv.DistanceTypes).
+@param maskSize size of the distance transform mask, see (cv.DistanceTransformMasks).
+@param labelType type of output image. It can be cv.CV_8U or cv.CV_32F. Type cv.CV_8U can be used only for the first variant of the function and distanceType == DIST_L1.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_watershed_distanceTransform.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+In the thresholded image, we get some regions of coins which we are sure of coins
+and they are detached now. (In some cases, you may be interested in only foreground segmentation,
+not in separating the mutually touching objects. In that case, you need not use distance transform,
+just erosion is sufficient. Erosion is just another method to extract sure foreground area, that's
+all.)
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_watershed_foreground.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Now we know for sure which are region of coins, which are background and all. So we create marker
+(it is an array of same size as that of original image, but with int32 datatype) and label the
+regions inside it. The regions we know for sure (whether foreground or background) are labelled with
+any positive integers, but different integers, and the area we don't know for sure are just left as
+zero. For this we use **cv.connectedComponents()**. It labels background of the image with 0, then
+other objects are labelled with integers starting from 1.
+
+But we know that if background is marked with 0, watershed will consider it as unknown area. So we
+want to mark it with different integer. Instead, we will mark unknown region, defined by unknown,
+with 0.
+
+Now our marker is ready. It is time for final step, apply watershed. Then marker image will be
+modified. The boundary region will be marked with -1.
+
+We use the function: **cv.connectedComponents (image, labels, connectivity = 8, ltype = cv.CV_32S)**
+@param image the 8-bit single-channel image to be labeled.
+@param labels destination labeled image(cv.CV_32SC1 type).
+@param connectivity 8 or 4 for 8-way or 4-way connectivity respectively.
+@param ltype output image label type. Currently cv.CV_32S and cv.CV_16U are supported.
+
+We use the function: **cv.watershed (image, markers)**
+
+@param image input 8-bit 3-channel image.
+@param markers input/output 32-bit single-channel image (map) of markers. It should have the same size as image .
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_watershed_watershed.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Face Detection using Haar Cascades {#tutorial_js_face_detection}
+==================================
+
+Goal
+----
+
+- learn the basics of face detection using Haar Feature-based Cascade Classifiers
+- extend the same for eye detection etc.
+
+Basics
+------
+
+Object Detection using Haar feature-based cascade classifiers is an effective method proposed by Paul Viola and Michael Jones in the 2001 paper, "Rapid Object Detection using a
+Boosted Cascade of Simple Features". It is a machine learning based approach in which a cascade
+function is trained from a lot of positive and negative images. It is then used to detect objects in
+other images.
+
+Here we will work with face detection. Initially, the algorithm needs a lot of positive images
+(images of faces) and negative images (images without faces) to train the classifier. Then we need
+to extract features from it. For this, Haar features shown in below image are used. They are just
+like our convolutional kernel. Each feature is a single value obtained by subtracting the sum of pixels
+under the white rectangle from the sum of pixels under the black rectangle.
+
+![image](images/haar_features.jpg)
+
+Now all possible sizes and locations of each kernel are used to calculate plenty of features. For each
+feature calculation, we need to find the sum of the pixels under the white and black rectangles. To solve this,
+they introduced the integral images. It simplifies calculation of the sum of the pixels, how large may be
+the number of pixels, to an operation involving just four pixels.
+
+But among all these features we calculated, most of them are irrelevant. For example, consider the
+image below. Top row shows two good features. The first feature selected seems to focus on the
+property that the region of the eyes is often darker than the region of the nose and cheeks. The
+second feature selected relies on the property that the eyes are darker than the bridge of the nose.
+But the same windows applying on cheeks or any other place is irrelevant. So how do we select the
+best features out of 160000+ features? It is achieved by **Adaboost**.
+
+![image](images/haar.png)
+
+For this, we apply each and every feature on all the training images. For each feature, it finds the
+best threshold which will classify the faces to positive and negative. But obviously, there will be
+errors or misclassifications. We select the features with minimum error rate, which means they are
+the features that best classifies the face and non-face images. (The process is not as simple as
+this. Each image is given an equal weight in the beginning. After each classification, weights of
+misclassified images are increased. Then again same process is done. New error rates are calculated.
+Also new weights. The process is continued until required accuracy or error rate is achieved or
+required number of features are found).
+
+Final classifier is a weighted sum of these weak classifiers. It is called weak because it alone
+can't classify the image, but together with others forms a strong classifier. The paper says even
+200 features provide detection with 95% accuracy. Their final setup had around 6000 features.
+(Imagine a reduction from 160000+ features to 6000 features. That is a big gain).
+
+So now you take an image. Take each 24x24 window. Apply 6000 features to it. Check if it is face or
+not. Wow.. Wow.. Isn't it a little inefficient and time consuming? Yes, it is. Authors have a good
+solution for that.
+
+In an image, most of the image region is non-face region. So it is a better idea to have a simple
+method to check if a window is not a face region. If it is not, discard it in a single shot. Don't
+process it again. Instead focus on region where there can be a face. This way, we can find more time
+to check a possible face region.
+
+For this they introduced the concept of **Cascade of Classifiers**. Instead of applying all the 6000
+features on a window, group the features into different stages of classifiers and apply one-by-one.
+(Normally first few stages will contain very less number of features). If a window fails the first
+stage, discard it. We don't consider remaining features on it. If it passes, apply the second stage
+of features and continue the process. The window which passes all stages is a face region. How is
+the plan !!!
+
+Authors' detector had 6000+ features with 38 stages with 1, 10, 25, 25 and 50 features in first five
+stages. (Two features in the above image is actually obtained as the best two features from
+Adaboost). According to authors, on an average, 10 features out of 6000+ are evaluated per
+sub-window.
+
+So this is a simple intuitive explanation of how Viola-Jones face detection works. Read paper for
+more details.
+
+Haar-cascade Detection in OpenCV
+--------------------------------
+
+Here we will deal with detection. OpenCV already contains many pre-trained classifiers for face,
+eyes, smile etc. Those XML files are stored in opencv/data/haarcascades/ folder. Let's create a face
+and eye detector with OpenCV.
+
+We use the function: **detectMultiScale (image, objects, scaleFactor = 1.1, minNeighbors = 3, flags = 0, minSize = new cv.Size(0, 0), maxSize = new cv.Size(0, 0))**
+
+@param image matrix of the type CV_8U containing an image where objects are detected.
+@param objects vector of rectangles where each rectangle contains the detected object. The rectangles may be partially outside the original image.
+@param scaleFactor parameter specifying how much the image size is reduced at each image scale.
+@param minNeighbors parameter specifying how many neighbors each candidate rectangle should have to retain it.
+@param flags parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects. It is not used for a new cascade.
+@param minSize minimum possible object size. Objects smaller than this are ignored.
+@param maxSize maximum possible object size. Objects larger than this are ignored. If maxSize == minSize model is evaluated on single scale.
+
+@note Don't forget to delete CascadeClassifier and RectVector!
+
+Try it
+------
+
+Try this demo using the code above. Canvas elements named haarCascadeDetectionCanvasInput and haarCascadeDetectionCanvasOutput have been prepared. Choose an image and
+click `Try it` to see the result. You can change the code in the textbox to investigate more.
+
+\htmlonly
+<iframe src="../../js_face_detection.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Face Detection in Video Capture {#tutorial_js_face_detection_camera}
+==================================
+
+Goal
+----
+
+- learn how to detect faces in video capture.
+
+@note If you don't know how to capture video from camera, please review @ref tutorial_js_video_display.
+
+\htmlonly
+<iframe src="../../js_face_detection_camera.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+Object Detection {#tutorial_js_table_of_contents_objdetect}
+================
+
+- @subpage tutorial_js_face_detection
+
+ Face detection
+ using haar-cascades
+
+- @subpage tutorial_js_face_detection_camera
+
+ Face Detection in Video Capture
--- /dev/null
+Introduction to OpenCV.js and Tutorials {#tutorial_js_intro}
+=======================================
+
+OpenCV
+------
+
+OpenCV was created at Intel in 1999 by **Gary Bradski**. The first release came out in 2000. **Vadim Pisarevsky** joined Gary Bradski to manage Intel's Russian software OpenCV team. In 2005, OpenCV was used on Stanley; the vehicle that won the 2005 DARPA Grand Challenge. Later, its active development continued under the support of Willow Garage, with Gary Bradski and Vadim Pisarevsky leading the project. OpenCV now supports a multitude of algorithms related to Computer Vision and Machine Learning and is expanding day by day.
+
+OpenCV supports a wide variety of programming languages such as C++, Python, and Java, and is available on different platforms including Windows, Linux, OS X, Android, and iOS. Interfaces for high-speed GPU operations based on CUDA and OpenCL are also under active development. OpenCV.js brings OpenCV to the open web platform and makes it available to the JavaScript programmer.
+
+OpenCV.js: OpenCV for the JavaScript programmer
+-------------
+
+Web is the most ubiquitous open computing platform. With HTML5 standards implemented in every browser, web applications are able to render online video with HTML5 video tags, capture webcam video via WebRTC API, and access each pixel of a video frame via canvas API. With abundance of available multimedia content, web developers are in need of a wide array of image and vision processing algorithms in JavaScript to build innovative applications. This requirement is even more essential for emerging applications on the web, such as Web Virtual Reality (WebVR) and Augmented Reality (WebAR). All of these use cases demand efficient implementations of computation-intensive vision kernels on web.
+
+[Emscripten](http://kripken.github.io/emscripten-site) is an LLVM-to-JavaScript compiler. It takes LLVM bitcode - which can be generated from C/C++ using clang, and compiles that into asm.js or WebAssembly that can execute directly inside the web browsers. . Asm.js is a highly optimizable, low-level subset of JavaScript. Asm.js enables ahead-of-time compilation and optimization in JavaScript engine that provide near-to-native execution speed. WebAssembly is a new portable, size- and load-time-efficient binary format suitable for compilation to the web. WebAssembly aims to execute at native speed. WebAssembly is currently being designed as an open standard by W3C.
+
+OpenCV.js is a JavaScript binding for selected subset of OpenCV functions for the web platform. It allows emerging web applications with multimedia processing to benefit from the wide variety of vision functions available in OpenCV. OpenCV.js leverages Emscripten to compile OpenCV functions into asm.js or WebAssembly targets, and provides a JavaScript APIs for web application to access them. The future versions of the library will take advantage of acceleration APIs that are available on the Web such as SIMD and multi-threaded execution.
+
+OpenCV.js was initially created in Parallel Architectures and Systems Group at University of California Irvine (UCI) as a research project funded by Intel Corporation. OpenCV.js was further improved and integrated into the OpenCV project as part of Google Summer of Code 2017 program.
+
+OpenCV.js Tutorials
+-----------------------
+
+OpenCV introduces a new set of tutorials that will guide you through various functions available in OpenCV.js. **This guide is mainly focused on OpenCV 3.x version**.
+
+The purpose of OpenCV.js tutorials is to:
+-# Help with adaptability of OpenCV in web development
+-# Help the web community, developers and computer vision researchers to interactively access a variety of web-based OpenCV examples to help them understand specific vision algorithms.
+
+Because OpenCV.js is able to run directly inside browser, the OpenCV.js tutorial web pages are intuitive and interactive. For example, using WebRTC API and evaluating JavaScript code would allow developers to change the parameters of CV functions and do live CV coding on web pages to see the results in real time.
+
+Prior knowledge of JavaScript and web application development is recommended to understand this guide.
+
+Contributors
+------------
+
+Below is the list of contributors of OpenCV.js bindings and tutorials.
+
+- Sajjad Taheri (Architect of the initial version and GSoC mentor, University of California, Irvine)
+- Congxiang Pan (GSoC student, Shanghai Jiao Tong University)
+- Gang Song (GSoC student, Shanghai Jiao Tong University)
+- Wenyao Gan (Student intern, Shanghai Jiao Tong University)
+- Mohammad Reza Haghighat (Project initiator & sponsor, Intel Corporation)
+- Ningxin Hu (Students' supervisor, Intel Corporation)
\ No newline at end of file
--- /dev/null
+Build OpenCV.js {#tutorial_js_setup}
+===============================
+
+
+Installing Emscripten
+-----------------------------
+
+[Emscripten](https://github.com/kripken/emscripten) is an LLVM-to-JavaScript compiler. We will use Emscripten to build OpenCV.js.
+
+To Install Emscripten, follow instructions of [Emscripten SDK](https://kripken.github.io/emscripten-site/docs/getting_started/downloads.html).
+
+For example:
+@code{.bash}
+./emsdk update
+./emsdk install latest
+./emsdk activate latest
+@endcode
+
+@note
+To compile to [WebAssembly](http://webassembly.org), you need to install and activate [Binaryen](https://github.com/WebAssembly/binaryen) with the `emsdk` command. Please refer to [Developer's Guide](http://webassembly.org/getting-started/developers-guide/) for more details.
+
+After install, ensure the `EMSCRIPTEN` environment is setup correctly.
+
+For example:
+@code{.bash}
+source ./emsdk_env.sh
+echo ${EMSCRIPTEN}
+@endcode
+
+Obtaining OpenCV Source Code
+--------------------------
+
+You can use the latest stable OpenCV version or you can grab the latest snapshot from our [Git
+repository](https://github.com/opencv/opencv.git).
+
+### Obtaining the Latest Stable OpenCV Version
+
+- Go to our [releases page](http://opencv.org/releases.html).
+- Download the source archive and unpack it.
+
+### Obtaining the Cutting-edge OpenCV from the Git Repository
+
+Launch Git client and clone [OpenCV repository](http://github.com/opencv/opencv).
+
+For example:
+@code{.bash}
+git clone https://github.com/opencv/opencv.git
+@endcode
+
+@note
+It requires `git` installed in your development environment.
+
+Building OpenCV.js from Source
+---------------------------------------
+
+-# To build `opencv.js`, execute python script `<opencv_src_dir>/platforms/js/build_js.py <build_dir>`.
+
+ For example, to build in `build_js` directory:
+ @code{.bash}
+ cd opencv
+ python ./platforms/js/build_js.py build_js
+ @endcode
+
+ @note
+ It requires `python` and `cmake` installed in your development environment.
+
+-# The build script builds asm.js version by default. To build WebAssembly version, append `--build_wasm` switch.
+
+ For example, to build wasm version in `build_wasm` directory:
+ @code{.bash}
+ python ./platforms/js/build_js.py build_wasm --build_wasm
+ @endcode
+
+-# [optional] To build documents, append `--build_doc` option.
+
+ For example:
+ @code{.bash}
+ python ./platforms/js/build_js.py build_js --build_doc
+ @endcode
+
+ @note
+ It requires `doxygen` installed in your development environment.
+
+-# [optional] To build tests, append `--build_test` option.
+
+ For example:
+ @code{.bash}
+ python ./platforms/js/build_js.py build_js --build_test
+ @endcode
+
+ To run tests, launch a local web server in \<build_dir\>/bin folder. For example, node http-server which serves on `localhost:8080`.
+
+ Navigate the web browser to `http://localhost:8080/tests.html`, which runs the unit tests automatically.
+
+ You can also run tests using Node.js.
+
+ For example:
+ @code{.sh}
+ cd bin
+ npm install
+ node tests.js
+ @endcode
+
+ @note
+ It requires `node` installed in your development environment.
--- /dev/null
+Introduction to OpenCV.js {#tutorial_js_table_of_contents_setup}
+======================
+
+- @subpage tutorial_js_intro
+
+ Introduction of OpenCV.js and Tutorials
+
+- @subpage tutorial_js_usage
+
+ Get started with OpenCV.js
+
+- @subpage tutorial_js_setup
+
+ Build OpenCV.js from source
--- /dev/null
+Using OpenCV.js {#tutorial_js_usage}
+===============================
+
+Steps
+-----
+
+In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page.
+
+### Create a web page
+
+First, let's create a simple web page that is able to upload an image.
+
+@code{.js}
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Hello OpenCV.js</title>
+</head>
+<body>
+<h2>Hello OpenCV.js</h2>
+<div>
+ <div class="inputoutput">
+ <img id="imageSrc" alt="No Image" />
+ <div class="caption">imageSrc <input type="file" id="fileInput" name="file" /></div>
+ </div>
+</div>
+<script type="text/javascript">
+let imgElement = document.getElementById("imageSrc")
+let inputElement = document.getElementById("fileInput");
+inputElement.addEventListener("change", (e) => {
+ imgElement.src = URL.createObjectURL(e.target.files[0]);
+}, false);
+</script>
+</body>
+</html>
+@endcode
+
+To run this web page, copy the content above and save to a local index.html file. To run it, open it using your web browser.
+
+@note It is a better practice to use a local web server to host the index.html.
+
+### Include OpenCV.js
+
+Set the URL of `opencv.js` to `src` attribute of \<script\> tag.
+
+@note For this tutorial, we host `opencv.js` at same folder as index.html.
+
+Example for synchronous loading:
+@code{.js}
+<script src="opencv.js" type="text/javascript"></script>
+@endcode
+
+You may want to load `opencv.js` asynchronously by `async` attribute in \<script\> tag. To be notified when `opencv.js` is ready, you can register a callback to `onload` attribute.
+
+Example for asynchronous loading
+@code{.js}
+<script async src="opencv.js" onload="onOpenCvReady();" type="text/javascript"></script>
+@endcode
+
+### Use OpenCV.js
+
+Once `opencv.js` is ready, you can access OpenCV objects and functions through `cv` object.
+
+For example, you can create a cv.Mat from an image by cv.imread.
+
+@note Because image loading is asynchronous, you need to put cv.Mat creation inside the `onload` callback.
+
+@code{.js}
+imgElement.onload = function() {
+ let mat = cv.imread(imgElement);
+}
+@endcode
+
+Many OpenCV functions can be used to process cv.Mat. You can refer to other tutorials, such as @ref tutorial_js_table_of_contents_imgproc, for details.
+
+In this tutorial, we just show a cv.Mat on screen. To show a cv.Mat, you need a canvas element.
+
+@code{.js}
+<canvas id="outputCanvas"></canvas>
+@endcode
+
+You can use cv.imshow to show cv.Mat on the canvas.
+@code{.js}
+cv.imshow(mat, "outputCanvas");
+@endcode
+
+Putting all of the steps together, the final index.html is shown below.
+
+@code{.js}
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8">
+<title>Hello OpenCV.js</title>
+</head>
+<body>
+<h2>Hello OpenCV.js</h2>
+<p id="status">OpenCV.js is loading...</p>
+<div>
+ <div class="inputoutput">
+ <img id="imageSrc" alt="No Image" />
+ <div class="caption">imageSrc <input type="file" id="fileInput" name="file" /></div>
+ </div>
+ <div class="inputoutput">
+ <canvas id="canvasOutput" ></canvas>
+ <div class="caption">canvasOutput</div>
+ </div>
+</div>
+<script type="text/javascript">
+let imgElement = document.getElementById('imageSrc');
+let inputElement = document.getElementById('fileInput');
+inputElement.addEventListener('change', (e) => {
+ imgElement.src = URL.createObjectURL(e.target.files[0]);
+}, false);
+
+imgElement.onload = function() {
+ let mat = cv.imread(imgElement);
+ cv.imshow('canvasOutput', mat);
+ mat.delete();
+};
+
+function onOpenCvReady() {
+ document.getElementById('status').innerHTML = 'OpenCV.js is ready.';
+}
+</script>
+<script async src="opencv.js" onload="onOpenCvReady();" type="text/javascript"></script>
+</body>
+</html>
+@endcode
+
+@note You have to call delete method of cv.Mat to free memory allocated in Emscripten's heap. Please refer to [Memeory management of Emscripten](https://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/embind.html#memory-management) for details.
+
+Try it
+------
+\htmlonly
+<iframe src="../../js_setup_usage.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
\ No newline at end of file
--- /dev/null
+OpenCV.js Tutorials {#tutorial_js_root}
+=======================
+- @subpage tutorial_js_table_of_contents_setup
+
+ Learn how to use OpenCV.js inside your web pages!
+
+- @subpage tutorial_js_table_of_contents_gui
+
+ Here you will learn how to read and display images and videos, and create trackbar.
+
+- @subpage tutorial_js_table_of_contents_core
+
+ In this section you will learn some basic operations on image, some mathematical tools and some data structures etc.
+
+- @subpage tutorial_js_table_of_contents_imgproc
+
+ In this section
+ you will learn different image processing functions inside OpenCV.
+
+- @subpage tutorial_js_table_of_contents_video
+
+ In this section you
+ will learn different techniques to work with videos like object tracking etc.
+
+- @subpage tutorial_js_table_of_contents_objdetect
+
+ In this section you
+ will object detection techniques like face detection etc.
--- /dev/null
+Background Subtraction {#tutorial_js_bg_subtraction}
+======================
+
+Goal
+----
+
+- We will familiarize with the background subtraction methods available in OpenCV.js.
+
+Basics
+------
+
+Background subtraction is a major preprocessing steps in many vision based applications. For
+example, consider the cases like visitor counter where a static camera takes the number of visitors
+entering or leaving the room, or a traffic camera extracting information about the vehicles etc. In
+all these cases, first you need to extract the person or vehicles alone. Technically, you need to
+extract the moving foreground from static background.
+
+If you have an image of background alone, like image of the room without visitors, image of the road
+without vehicles etc, it is an easy job. Just subtract the new image from the background. You get
+the foreground objects alone. But in most of the cases, you may not have such an image, so we need
+to extract the background from whatever images we have. It become more complicated when there is
+shadow of the vehicles. Since shadow is also moving, simple subtraction will mark that also as
+foreground. It complicates things.
+
+OpenCV.js has implemented one algorithm for this purpose, which is very easy to use.
+
+BackgroundSubtractorMOG2
+------------------------
+
+It is a Gaussian Mixture-based Background/Foreground Segmentation Algorithm. It is based on two
+papers by Z.Zivkovic, "Improved adaptive Gausian mixture model for background subtraction" in 2004
+and "Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction"
+in 2006. One important feature of this algorithm is that it selects the appropriate number of
+gaussian distribution for each pixel. It provides better adaptibility to varying scenes due illumination
+changes etc.
+
+While coding, we use the constructor: **cv.BackgroundSubtractorMOG2 (history = 500, varThreshold = 16,
+detectShadows = true)**
+@param history Length of the history.
+@param varThreshold Threshold on the squared distance between the pixel and the sample to decide
+whether a pixel is close to that sample. This parameter does not affect the background update.
+@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
+speed a bit, so if you do not need this feature, set the parameter to false.
+@return instance of cv.BackgroundSubtractorMOG2
+
+Use **apply (image, fgmask, learningRate = -1)** method to get the foreground mask
+@param image Next video frame. Floating point frame will be used without scaling and should
+be in range [0,255].
+@param fgmask The output foreground mask as an 8-bit binary image.
+@param learningRate The value between 0 and 1 that indicates how fast the background model is learnt.
+Negative parameter value makes the algorithm to use some automatically chosen learning rate. 0 means
+that the background model is not updated at all, 1 means that the background model is completely
+reinitialized from the last frame.
+
+@note The instance of cv.BackgroundSubtractorMOG2 should be deleted manually.
+
+Try it
+------
+
+\htmlonly
+<iframe src="../../js_bg_subtraction.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
--- /dev/null
+Optical Flow {#tutorial_js_lucas_kanade}
+============
+
+Goal
+----
+
+- We will understand the concepts of optical flow and its estimation using Lucas-Kanade
+ method.
+- We will use functions like **cv.calcOpticalFlowPyrLK()** to track feature points in a
+ video.
+
+Optical Flow
+------------
+
+Optical flow is the pattern of apparent motion of image objects between two consecutive frames
+caused by the movemement of object or camera. It is 2D vector field where each vector is a
+displacement vector showing the movement of points from first frame to second. Consider the image
+below (Image Courtesy: [Wikipedia article on Optical
+Flow](http://en.wikipedia.org/wiki/Optical_flow)).
+
+![image](images/optical_flow_basic1.jpg)
+
+It shows a ball moving in 5 consecutive frames. The arrow shows its displacement vector. Optical
+flow has many applications in areas like :
+
+- Structure from Motion
+- Video Compression
+- Video Stabilization ...
+
+Optical flow works on several assumptions:
+
+-# The pixel intensities of an object do not change between consecutive frames.
+2. Neighbouring pixels have similar motion.
+
+Consider a pixel \f$I(x,y,t)\f$ in first frame (Check a new dimension, time, is added here. Earlier we
+were working with images only, so no need of time). It moves by distance \f$(dx,dy)\f$ in next frame
+taken after \f$dt\f$ time. So since those pixels are the same and intensity does not change, we can say,
+
+\f[I(x,y,t) = I(x+dx, y+dy, t+dt)\f]
+
+Then take taylor series approximation of right-hand side, remove common terms and divide by \f$dt\f$ to
+get the following equation:
+
+\f[f_x u + f_y v + f_t = 0 \;\f]
+
+where:
+
+\f[f_x = \frac{\partial f}{\partial x} \; ; \; f_y = \frac{\partial f}{\partial y}\f]\f[u = \frac{dx}{dt} \; ; \; v = \frac{dy}{dt}\f]
+
+Above equation is called Optical Flow equation. In it, we can find \f$f_x\f$ and \f$f_y\f$, they are image
+gradients. Similarly \f$f_t\f$ is the gradient along time. But \f$(u,v)\f$ is unknown. We cannot solve this
+one equation with two unknown variables. So several methods are provided to solve this problem and
+one of them is Lucas-Kanade.
+
+### Lucas-Kanade method
+
+We have seen an assumption before, that all the neighbouring pixels will have similar motion.
+Lucas-Kanade method takes a 3x3 patch around the point. So all the 9 points have the same motion. We
+can find \f$(f_x, f_y, f_t)\f$ for these 9 points. So now our problem becomes solving 9 equations with
+two unknown variables which is over-determined. A better solution is obtained with least square fit
+method. Below is the final solution which is two equation-two unknown problem and solve to get the
+solution.
+
+\f[\begin{bmatrix} u \\ v \end{bmatrix} =
+\begin{bmatrix}
+ \sum_{i}{f_{x_i}}^2 & \sum_{i}{f_{x_i} f_{y_i} } \\
+ \sum_{i}{f_{x_i} f_{y_i}} & \sum_{i}{f_{y_i}}^2
+\end{bmatrix}^{-1}
+\begin{bmatrix}
+ - \sum_{i}{f_{x_i} f_{t_i}} \\
+ - \sum_{i}{f_{y_i} f_{t_i}}
+\end{bmatrix}\f]
+
+( Check similarity of inverse matrix with Harris corner detector. It denotes that corners are better
+points to be tracked.)
+
+So from user point of view, idea is simple, we give some points to track, we receive the optical
+flow vectors of those points. But again there are some problems. Until now, we were dealing with
+small motions. So it fails when there is large motion. So again we go for pyramids. When we go up in
+the pyramid, small motions are removed and large motions becomes small motions. So applying
+Lucas-Kanade there, we get optical flow along with the scale.
+
+Lucas-Kanade Optical Flow in OpenCV.js
+-----------------------------------
+
+We use the function: **cv.calcOpticalFlowPyrLK (prevImg, nextImg, prevPts, nextPts, status, err, winSize =
+new cv.Size(21, 21), maxLevel = 3, criteria = new cv.TermCriteria(cv.TermCriteria_COUNT+
+cv.TermCriteria_EPS, 30, 0.01), flags = 0, minEigThreshold = 1e-4)**.
+@param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
+@param nextImg second input image or pyramid of the same size and the same type as prevImg.
+@param prevPts vector of 2D points for which the flow needs to be found; point coordinates must
+be single-precision floating-point numbers.
+@param nextPts output vector of 2D points (with single-precision floating-point coordinates)
+containing the calculated new positions of input features in the second image; when cv.OPTFLOW_USE_
+INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
+@param status output status vector (of unsigned chars); each element of the vector is set to 1
+if the flow for the corresponding features has been found, otherwise, it is set to 0.
+@param err output vector of errors; each element of the vector is set to an error for the
+corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
+found then the error is not defined (use the status parameter to find such cases).
+@param winSize size of the search window at each pyramid level.
+@param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
+level), if set to 1, two levels are used, and so on; if pyramids are passed to input then algorithm
+will use as many levels as pyramids have but no more than maxLevel.
+@param criteria parameter, specifying the termination criteria of the iterative search algorithm
+(after the specified maximum number of iterations criteria.maxCount or when the search window moves
+by less than criteria.epsilon.
+@param flags operation flags:
+- cv.OPTFLOW_USE_INITIAL_FLOW uses initial estimations, stored in nextPts; if the flag is not set,
+then prevPts is copied to nextPts and is considered the initial estimate.
+- cv.OPTFLOW_LK_GET_MIN_EIGENVALS use minimum eigen values as an error measure (see minEigThreshold
+description); if the flag is not set, then L1 distance between patches around the original and a moved
+point, divided by number of pixels in a window, is used as a error measure.
+@param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of
+optical flow equations, divided by number of pixels in a window; if this value is less than
+minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it
+allows to remove bad points and get a performance boost.
+
+### Try it
+
+\htmlonly
+<iframe src="../../js_optical_flow_lucas_kanade.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+(This code doesn't check how correct are the next keypoints. So even if any feature point disappears
+in image, there is a chance that optical flow finds the next point which may look close to it. So
+actually for a robust tracking, corner points should be detected in particular intervals.)
+
+Dense Optical Flow in OpenCV.js
+-------------------------------
+
+Lucas-Kanade method computes optical flow for a sparse feature set (in our example, corners detected
+using Shi-Tomasi algorithm). OpenCV.js provides another algorithm to find the dense optical flow. It
+computes the optical flow for all the points in the frame. It is based on Gunner Farneback's
+algorithm which is explained in "Two-Frame Motion Estimation Based on Polynomial Expansion" by
+Gunner Farneback in 2003.
+
+We use the function: **cv.calcOpticalFlowFarneback (prev, next, flow, pyrScale, levels, winsize,
+iterations, polyN, polySigma, flags)**
+@param prev first 8-bit single-channel input image.
+@param next second input image of the same size and the same type as prev.
+@param flow computed flow image that has the same size as prev and type CV_32FC2.
+@param pyrScale parameter, specifying the image scale (<1) to build pyramids for each image;
+pyrScale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one.
+@param levels number of pyramid layers including the initial image; levels=1 means that no extra
+layers are created and only the original images are used.
+@param winsize averaging window size; larger values increase the algorithm robustness to image noise
+and give more chances for fast motion detection, but yield more blurred motion field.
+@param iterations number of iterations the algorithm does at each pyramid level.
+@param polyN size of the pixel neighborhood used to find polynomial expansion in each pixel; larger
+values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm
+and more blurred motion field, typically polyN =5 or 7.
+@param polySigma standard deviation of the Gaussian that is used to smooth derivatives used as a
+basis for the polynomial expansion; for polyN=5, you can set polySigma=1.1, for polyN=7, a good
+value would be polySigma=1.5.
+@param flags operation flags that can be a combination of the following:
+- cv.OPTFLOW_USE_INITIAL_FLOW uses the input flow as an initial flow approximation.
+- cv.OPTFLOW_FARNEBACK_GAUSSIAN uses the Gaussian 𝚠𝚒𝚗𝚜𝚒𝚣𝚎×𝚠𝚒𝚗𝚜𝚒𝚣𝚎 filter instead of a box filter of
+the same size for optical flow estimation; usually, this option gives z more accurate flow than with
+a box filter, at the cost of lower speed; normally, winsize for a Gaussian window should be set to a
+larger value to achieve the same level of robustness.
+
+### Try it
+
+\htmlonly
+<iframe src="../../js_optical_flow_dense.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
--- /dev/null
+Meanshift and Camshift {#tutorial_js_meanshift}
+======================
+
+Goal
+----
+
+- We will learn about Meanshift and Camshift algorithms to find and track objects in videos.
+
+Meanshift
+---------
+
+The intuition behind the meanshift is simple. Consider you have a set of points. (It can be a pixel
+distribution like histogram backprojection). You are given a small window ( may be a circle) and you
+have to move that window to the area of maximum pixel density (or maximum number of points). It is
+illustrated in the simple image given below:
+
+![image](images/meanshift_basics.jpg)
+
+The initial window is shown in blue circle with the name "C1". Its original center is marked in blue
+rectangle, named "C1_o". But if you find the centroid of the points inside that window, you will
+get the point "C1_r" (marked in small blue circle) which is the real centroid of window. Surely
+they don't match. So move your window such that circle of the new window matches with previous
+centroid. Again find the new centroid. Most probably, it won't match. So move it again, and continue
+the iterations such that center of window and its centroid falls on the same location (or with a
+small desired error). So finally what you obtain is a window with maximum pixel distribution. It is
+marked with green circle, named "C2". As you can see in image, it has maximum number of points. The
+whole process is demonstrated on a static image below:
+
+![image](images/meanshift_face.gif)
+
+So we normally pass the histogram backprojected image and initial target location. When the object
+moves, obviously the movement is reflected in histogram backprojected image. As a result, meanshift
+algorithm moves our window to the new location with maximum density.
+
+### Meanshift in OpenCV.js
+
+To use meanshift in OpenCV.js, first we need to setup the target, find its histogram so that we can
+backproject the target on each frame for calculation of meanshift. We also need to provide initial
+location of window. For histogram, only Hue is considered here. Also, to avoid false values due to
+low light, low light values are discarded using **cv.inRange()** function.
+
+We use the function: **cv.meanShift (probImage, window, criteria)**
+@param probImage Back projection of the object histogram. See cv.calcBackProject for details.
+@param window Initial search window.
+@param criteria Stop criteria for the iterative search algorithm.
+@return number of iterations meanShift took to converge and the new location
+
+### Try it
+
+\htmlonly
+<iframe src="../../js_meanshift.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Camshift
+--------
+
+Did you closely watch the last result? There is a problem. Our window always has the same size when
+the object is farther away and it is very close to camera. That is not good. We need to adapt the window
+size with size and rotation of the target. Once again, the solution came from "OpenCV Labs" and it
+is called CAMshift (Continuously Adaptive Meanshift) published by Gary Bradsky in his paper
+"Computer Vision Face Tracking for Use in a Perceptual User Interface" in 1988.
+
+It applies meanshift first. Once meanshift converges, it updates the size of the window as,
+\f$s = 2 \times \sqrt{\frac{M_{00}}{256}}\f$. It also calculates the orientation of best fitting ellipse
+to it. Again it applies the meanshift with new scaled search window and previous window location.
+The process is continued until required accuracy is met.
+
+![image](images/camshift_face.gif)
+
+### Camshift in OpenCV.js
+
+It is almost same as meanshift, but it returns a rotated rectangle (that is our result) and box
+parameters (used to be passed as search window in next iteration).
+
+We use the function: **cv.CamShift (probImage, window, criteria)**
+@param probImage Back projection of the object histogram. See cv.calcBackProject for details.
+@param window Initial search window.
+@param criteria Stop criteria for the iterative search algorithm.
+@return Rotated rectangle and the new search window
+
+### Try it
+
+\htmlonly
+<iframe src="../../js_camshift.html" width="100%"
+ onload="this.style.height=this.contentDocument.body.scrollHeight +'px';">
+</iframe>
+\endhtmlonly
+
+Additional Resources
+--------------------
+
+-# French Wikipedia page on [Camshift](http://fr.wikipedia.org/wiki/Camshift). (The two animations
+ are taken from here)
+2. Bradski, G.R., "Real time face and object tracking as a component of a perceptual user
+ interface," Applications of Computer Vision, 1998. WACV '98. Proceedings., Fourth IEEE Workshop
+ on , vol., no., pp.214,219, 19-21 Oct 1998
--- /dev/null
+Video Analysis {#tutorial_js_table_of_contents_video}
+==============
+
+- @subpage tutorial_js_meanshift
+
+ Here, we will learn about tracking algorithms such as "Meanshift", and its upgraded version, "Camshift"
+ to find and track objects in videos.
+
+- @subpage tutorial_js_lucas_kanade
+
+ Now let's discuss an important concept, "Optical Flow", which is related to videos and has many
+ applications.
+
+- @subpage tutorial_js_bg_subtraction
+
+ In several applications, we need to extract foreground for further operations like object tracking.
+ Background Subtraction is a well-known method in those cases.
- @ref intro
- @ref tutorial_root
- @ref tutorial_py_root
+@CMAKE_DOXYGEN_TUTORIAL_JS_ROOT@
@CMAKE_DOXYGEN_TUTORIAL_CONTRIB_ROOT@
- @ref faq
- @ref citelist
ocv_add_module(core
OPTIONAL opencv_cudev
- WRAP java python)
+ WRAP java python js)
set(extra_libs "")
set(the_description "Image Processing")
ocv_add_dispatched_file(accum SSE2 AVX NEON)
-ocv_define_module(imgproc opencv_core WRAP java python)
+ocv_define_module(imgproc opencv_core WRAP java python js)
--- /dev/null
+# ----------------------------------------------------------------------------
+# CMake file for js support
+# ----------------------------------------------------------------------------
+
+# message(STATUS "---------------- Start of JavaScript module ----------------------")
+
+set(the_description "The js bindings")
+set(MODULE_NAME js)
+
+set(OPENCV_JS "opencv.js")
+
+ocv_add_module(${MODULE_NAME} BINDINGS)
+
+# TODO: add emscripten path
+ocv_module_include_directories()
+
+# get list of modules to wrap
+# message(STATUS "Wrapped in ${MODULE_NAME}:")
+set(OPENCV_JS_MODULES)
+foreach(m ${OPENCV_MODULES_BUILD})
+ if (";${OPENCV_MODULE_${m}_WRAPPERS};" MATCHES ";${MODULE_NAME};" AND HAVE_${m})
+ list(APPEND OPENCV_JS_MODULES ${m})
+ # message(STATUS "\t${m}")
+ endif()
+endforeach()
+
+set(opencv_hdrs "")
+foreach(m ${OPENCV_JS_MODULES})
+ list(APPEND opencv_hdrs ${OPENCV_MODULE_${m}_HEADERS})
+endforeach(m)
+
+# header blacklist
+ocv_list_filterout(opencv_hdrs "modules/.*.h$")
+ocv_list_filterout(opencv_hdrs "modules/core/.*/cuda")
+ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/opengl.hpp")
+ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/ocl.hpp")
+ocv_list_filterout(opencv_hdrs "modules/cuda.*")
+ocv_list_filterout(opencv_hdrs "modules/cudev")
+ocv_list_filterout(opencv_hdrs "modules/core/.*/hal/")
+ocv_list_filterout(opencv_hdrs "modules/.*/detection_based_tracker.hpp") # Conditional compilation
+ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/utils/trace.*.hpp")
+
+file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/headers.txt" "${opencv_hdrs}")
+
+set(bindings_cpp "${CMAKE_CURRENT_BINARY_DIR}/bindings.cpp")
+
+set(scripts_hdr_parser "${CMAKE_CURRENT_SOURCE_DIR}/../python/src2/hdr_parser.py")
+
+set(JS_HELPER "${CMAKE_CURRENT_SOURCE_DIR}/src/helpers.js")
+
+add_custom_command(
+ OUTPUT ${bindings_cpp}
+ COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src/embindgen.py" ${scripts_hdr_parser} ${bindings_cpp} "${CMAKE_CURRENT_BINARY_DIR}/headers.txt" "${CMAKE_CURRENT_SOURCE_DIR}/src/core_bindings.cpp"
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/core_bindings.cpp
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/embindgen.py
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/templates.py
+ DEPENDS ${scripts_hdr_parser}
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/headers.txt
+ DEPENDS ${opencv_hdrs}
+ DEPENDS ${JS_HELPER})
+
+add_definitions("-std=c++11")
+
+link_libraries(${OPENCV_MODULE_${the_module}_DEPS})
+
+ocv_add_executable(${the_module} ${bindings_cpp})
+
+set_target_properties(${the_module} PROPERTIES COMPILE_FLAGS "-Wno-missing-prototypes")
+
+set_target_properties(${the_module} PROPERTIES LINK_FLAGS "--memory-init-file 0 -s TOTAL_MEMORY=134217728 -s ALLOW_MEMORY_GROWTH=1 -s MODULARIZE=1 -s EXPORT_NAME=\"'cv'\" -s DEMANGLE_SUPPORT=1 -s FORCE_FILESYSTEM=1 --use-preload-plugins --bind --post-js ${JS_HELPER} -Wno-missing-prototypes")
+
+# add UMD wrapper
+set(MODULE_JS_PATH "${OpenCV_BINARY_DIR}/bin/${the_module}.js")
+set(OCV_JS_PATH "${OpenCV_BINARY_DIR}/bin/${OPENCV_JS}")
+
+add_custom_command(
+ OUTPUT ${OCV_JS_PATH}
+ COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src/make_umd.py" ${MODULE_JS_PATH} "${OCV_JS_PATH}"
+ DEPENDS ${the_module}
+ DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/src/make_umd.py")
+
+add_custom_target(${OPENCV_JS} ALL
+ DEPENDS ${OCV_JS_PATH}
+ DEPENDS ${the_module})
+
+# test
+set(opencv_test_js_bin_dir "${EXECUTABLE_OUTPUT_PATH}")
+set(test_dir ${CMAKE_CURRENT_SOURCE_DIR}/test)
+
+set(opencv_test_js_file_deps "")
+
+# message(STATUS "${opencv_test_js_bin_dir}")
+
+# make sure the build directory exists
+file(MAKE_DIRECTORY "${opencv_test_js_bin_dir}")
+
+# gather and copy specific files for js test
+file(GLOB_RECURSE test_files RELATIVE "${test_dir}" "${test_dir}/*")
+foreach(f ${test_files})
+ # message(STATUS "copy ${test_dir}/${f} ${opencv_test_js_bin_dir}/${f}")
+ add_custom_command(OUTPUT "${opencv_test_js_bin_dir}/${f}"
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different "${test_dir}/${f}" "${opencv_test_js_bin_dir}/${f}"
+ DEPENDS "${test_dir}/${f}"
+ COMMENT "Copying ${f}"
+ )
+ list(APPEND opencv_test_js_file_deps "${test_dir}/${f}" "${opencv_test_js_bin_dir}/${f}")
+endforeach()
+
+# copy test data
+set(test_data "haarcascade_frontalface_default.xml")
+set(test_data_path "${PROJECT_SOURCE_DIR}/../../data/haarcascades/${test_data}")
+
+add_custom_command(OUTPUT "${opencv_test_js_bin_dir}/${test_data}"
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different "${test_data_path}" "${opencv_test_js_bin_dir}/${test_data}"
+ DEPENDS "${test_data_path}"
+ COMMENT "Copying ${test_data}"
+ )
+list(APPEND opencv_test_js_file_deps "${test_data_path}" "${opencv_test_js_bin_dir}/${test_data}")
+
+add_custom_target(${PROJECT_NAME}_test ALL
+ DEPENDS ${OCV_JS_PATH} ${opencv_test_js_file_deps})
+
+unset(MODULE_NAME)
+
+# message(STATUS "---------------- End of JavaScript module ----------------------")
--- /dev/null
+{
+ "extends": "google",
+ "parserOptions": {
+ "ecmaVersion": 6
+ },
+ "rules": {
+ "max-len": ["error", 100, {"ignoreUrls": true}],
+ "quotes": ["error", "single"],
+ "indent": ["error", 4, {"ArrayExpression": "first",
+ "CallExpression": {"arguments": "first"},
+ "SwitchCase": 1}],
+ "no-var": "off",
+ "prefer-rest-params": "off",
+ "require-jsdoc": "off"
+ }
+}
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+/*M///////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//M*/
+
+#include "opencv2/core.hpp"
+#include "opencv2/imgproc.hpp"
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/video/background_segm.hpp"
+#include "opencv2/objdetect.hpp"
+
+#include <emscripten/bind.h>
+
+using namespace emscripten;
+using namespace cv;
+
+namespace binding_utils
+{
+ template<typename T>
+ emscripten::val matData(const cv::Mat& mat)
+ {
+ return emscripten::val(emscripten::memory_view<T>((mat.total()*mat.elemSize())/sizeof(T),
+ (T*)mat.data));
+ }
+
+ template<typename T>
+ emscripten::val matPtr(const cv::Mat& mat, int i)
+ {
+ return emscripten::val(emscripten::memory_view<T>(mat.step1(0), mat.ptr<T>(i)));
+ }
+
+ template<typename T>
+ emscripten::val matPtr(const cv::Mat& mat, int i, int j)
+ {
+ return emscripten::val(emscripten::memory_view<T>(mat.step1(1), mat.ptr<T>(i,j)));
+ }
+
+ cv::Mat* createMat(int rows, int cols, int type, intptr_t data, size_t step)
+ {
+ return new cv::Mat(rows, cols, type, reinterpret_cast<void*>(data), step);
+ }
+
+ static emscripten::val getMatSize(const cv::Mat& mat)
+ {
+ emscripten::val size = emscripten::val::array();
+ for (int i = 0; i < mat.dims; i++) {
+ size.call<void>("push", mat.size[i]);
+ }
+ return size;
+ }
+
+ static emscripten::val getMatStep(const cv::Mat& mat)
+ {
+ emscripten::val step = emscripten::val::array();
+ for (int i = 0; i < mat.dims; i++) {
+ step.call<void>("push", mat.step[i]);
+ }
+ return step;
+ }
+
+ static Mat matEye(int rows, int cols, int type)
+ {
+ return Mat(cv::Mat::eye(rows, cols, type));
+ }
+
+ static Mat matEye(Size size, int type)
+ {
+ return Mat(cv::Mat::eye(size, type));
+ }
+
+ void convertTo(const Mat& obj, Mat& m, int rtype, double alpha, double beta)
+ {
+ obj.convertTo(m, rtype, alpha, beta);
+ }
+
+ void convertTo(const Mat& obj, Mat& m, int rtype)
+ {
+ obj.convertTo(m, rtype);
+ }
+
+ void convertTo(const Mat& obj, Mat& m, int rtype, double alpha)
+ {
+ obj.convertTo(m, rtype, alpha);
+ }
+
+ Size matSize(const cv::Mat& mat)
+ {
+ return mat.size();
+ }
+
+ cv::Mat matZeros(int arg0, int arg1, int arg2)
+ {
+ return cv::Mat::zeros(arg0, arg1, arg2);
+ }
+
+ cv::Mat matZeros(cv::Size arg0, int arg1)
+ {
+ return cv::Mat::zeros(arg0,arg1);
+ }
+
+ cv::Mat matOnes(int arg0, int arg1, int arg2)
+ {
+ return cv::Mat::ones(arg0, arg1, arg2);
+ }
+
+ cv::Mat matOnes(cv::Size arg0, int arg1)
+ {
+ return cv::Mat::ones(arg0, arg1);
+ }
+
+ double matDot(const cv::Mat& obj, const Mat& mat)
+ {
+ return obj.dot(mat);
+ }
+
+ Mat matMul(const cv::Mat& obj, const Mat& mat, double scale)
+ {
+ return Mat(obj.mul(mat, scale));
+ }
+
+ Mat matT(const cv::Mat& obj)
+ {
+ return Mat(obj.t());
+ }
+
+ Mat matInv(const cv::Mat& obj, int type)
+ {
+ return Mat(obj.inv(type));
+ }
+
+ void matCopyTo(const cv::Mat& obj, cv::Mat& mat)
+ {
+ return obj.copyTo(mat);
+ }
+
+ void matCopyTo(const cv::Mat& obj, cv::Mat& mat, const cv::Mat& mask)
+ {
+ return obj.copyTo(mat, mask);
+ }
+
+ Mat matDiag(const cv::Mat& obj, int d)
+ {
+ return obj.diag(d);
+ }
+
+ Mat matDiag(const cv::Mat& obj)
+ {
+ return obj.diag();
+ }
+
+ void matSetTo(cv::Mat& obj, const cv::Scalar& s)
+ {
+ obj.setTo(s);
+ }
+
+ void matSetTo(cv::Mat& obj, const cv::Scalar& s, const cv::Mat& mask)
+ {
+ obj.setTo(s, mask);
+ }
+
+ emscripten::val rotatedRectPoints(const cv::RotatedRect& obj)
+ {
+ cv::Point2f points[4];
+ obj.points(points);
+ emscripten::val pointsArray = emscripten::val::array();
+ for (int i = 0; i < 4; i++) {
+ pointsArray.call<void>("push", points[i]);
+ }
+ return pointsArray;
+ }
+
+ Rect rotatedRectBoundingRect(const cv::RotatedRect& obj)
+ {
+ return obj.boundingRect();
+ }
+
+ Rect2f rotatedRectBoundingRect2f(const cv::RotatedRect& obj)
+ {
+ return obj.boundingRect2f();
+ }
+
+ int cvMatDepth(int flags)
+ {
+ return CV_MAT_DEPTH(flags);
+ }
+
+ class MinMaxLoc
+ {
+ public:
+ double minVal;
+ double maxVal;
+ Point minLoc;
+ Point maxLoc;
+ };
+
+ MinMaxLoc minMaxLoc(const cv::Mat& src, const cv::Mat& mask)
+ {
+ MinMaxLoc result;
+ cv::minMaxLoc(src, &result.minVal, &result.maxVal, &result.minLoc, &result.maxLoc, mask);
+ return result;
+ }
+
+ MinMaxLoc minMaxLoc_1(const cv::Mat& src)
+ {
+ MinMaxLoc result;
+ cv::minMaxLoc(src, &result.minVal, &result.maxVal, &result.minLoc, &result.maxLoc);
+ return result;
+ }
+
+ class Circle
+ {
+ public:
+ Point2f center;
+ float radius;
+ };
+
+ Circle minEnclosingCircle(const cv::Mat& points)
+ {
+ Circle circle;
+ cv::minEnclosingCircle(points, circle.center, circle.radius);
+ return circle;
+ }
+
+ emscripten::val CamShiftWrapper(const cv::Mat& arg1, Rect& arg2, TermCriteria arg3)
+ {
+ RotatedRect rotatedRect = cv::CamShift(arg1, arg2, arg3);
+ emscripten::val result = emscripten::val::array();
+ result.call<void>("push", rotatedRect);
+ result.call<void>("push", arg2);
+ return result;
+ }
+
+ emscripten::val meanShiftWrapper(const cv::Mat& arg1, Rect& arg2, TermCriteria arg3)
+ {
+ int n = cv::meanShift(arg1, arg2, arg3);
+ emscripten::val result = emscripten::val::array();
+ result.call<void>("push", n);
+ result.call<void>("push", arg2);
+ return result;
+ }
+
+ std::string getExceptionMsg(const cv::Exception& e) {
+ return e.msg;
+ }
+
+ void setExceptionMsg(cv::Exception& e, std::string msg) {
+ e.msg = msg;
+ return;
+ }
+
+ cv::Exception exceptionFromPtr(intptr_t ptr) {
+ return *reinterpret_cast<cv::Exception*>(ptr);
+ }
+
+ std::string getBuildInformation() {
+ return cv::getBuildInformation();
+ }
+}
+
+EMSCRIPTEN_BINDINGS(binding_utils)
+{
+ register_vector<int>("IntVector");
+ register_vector<float>("FloatVector");
+ register_vector<double>("DoubleVector");
+ register_vector<cv::Point>("PointVector");
+ register_vector<cv::Mat>("MatVector");
+ register_vector<cv::Rect>("RectVector");
+
+ emscripten::class_<cv::Mat>("Mat")
+ .constructor<>()
+ .constructor<const Mat&>()
+ .constructor<Size, int>()
+ .constructor<int, int, int>()
+ .constructor<int, int, int, const Scalar&>()
+ .constructor(&binding_utils::createMat, allow_raw_pointers())
+
+ .class_function("eye", select_overload<Mat(int, int, int)>(&binding_utils::matEye))
+ .class_function("eye", select_overload<Mat(Size, int)>(&binding_utils::matEye))
+ .class_function("ones", select_overload<Mat(int, int, int)>(&binding_utils::matOnes))
+ .class_function("ones", select_overload<Mat(Size, int)>(&binding_utils::matOnes))
+ .class_function("zeros", select_overload<Mat(int, int, int)>(&binding_utils::matZeros))
+ .class_function("zeros", select_overload<Mat(Size, int)>(&binding_utils::matZeros))
+
+ .property("rows", &cv::Mat::rows)
+ .property("cols", &cv::Mat::cols)
+ .property("matSize", &binding_utils::getMatSize)
+ .property("step", &binding_utils::getMatStep)
+ .property("data", &binding_utils::matData<unsigned char>)
+ .property("data8S", &binding_utils::matData<char>)
+ .property("data16U", &binding_utils::matData<unsigned short>)
+ .property("data16S", &binding_utils::matData<short>)
+ .property("data32S", &binding_utils::matData<int>)
+ .property("data32F", &binding_utils::matData<float>)
+ .property("data64F", &binding_utils::matData<double>)
+
+ .function("elemSize", select_overload<size_t()const>(&cv::Mat::elemSize))
+ .function("elemSize1", select_overload<size_t()const>(&cv::Mat::elemSize1))
+ .function("channels", select_overload<int()const>(&cv::Mat::channels))
+ .function("convertTo", select_overload<void(const Mat&, Mat&, int, double, double)>(&binding_utils::convertTo))
+ .function("convertTo", select_overload<void(const Mat&, Mat&, int)>(&binding_utils::convertTo))
+ .function("convertTo", select_overload<void(const Mat&, Mat&, int, double)>(&binding_utils::convertTo))
+ .function("total", select_overload<size_t()const>(&cv::Mat::total))
+ .function("row", select_overload<Mat(int)const>(&cv::Mat::row))
+ .function("create", select_overload<void(int, int, int)>(&cv::Mat::create))
+ .function("create", select_overload<void(Size, int)>(&cv::Mat::create))
+ .function("rowRange", select_overload<Mat(int, int)const>(&cv::Mat::rowRange))
+ .function("rowRange", select_overload<Mat(const Range&)const>(&cv::Mat::rowRange))
+ .function("copyTo", select_overload<void(const Mat&, Mat&)>(&binding_utils::matCopyTo))
+ .function("copyTo", select_overload<void(const Mat&, Mat&, const Mat&)>(&binding_utils::matCopyTo))
+ .function("type", select_overload<int()const>(&cv::Mat::type))
+ .function("empty", select_overload<bool()const>(&cv::Mat::empty))
+ .function("colRange", select_overload<Mat(int, int)const>(&cv::Mat::colRange))
+ .function("colRange", select_overload<Mat(const Range&)const>(&cv::Mat::colRange))
+ .function("step1", select_overload<size_t(int)const>(&cv::Mat::step1))
+ .function("clone", select_overload<Mat()const>(&cv::Mat::clone))
+ .function("depth", select_overload<int()const>(&cv::Mat::depth))
+ .function("col", select_overload<Mat(int)const>(&cv::Mat::col))
+ .function("dot", select_overload<double(const Mat&, const Mat&)>(&binding_utils::matDot))
+ .function("mul", select_overload<Mat(const Mat&, const Mat&, double)>(&binding_utils::matMul))
+ .function("inv", select_overload<Mat(const Mat&, int)>(&binding_utils::matInv))
+ .function("t", select_overload<Mat(const Mat&)>(&binding_utils::matT))
+ .function("roi", select_overload<Mat(const Rect&)const>(&cv::Mat::operator()))
+ .function("diag", select_overload<Mat(const Mat&, int)>(&binding_utils::matDiag))
+ .function("diag", select_overload<Mat(const Mat&)>(&binding_utils::matDiag))
+ .function("isContinuous", select_overload<bool()const>(&cv::Mat::isContinuous))
+ .function("setTo", select_overload<void(Mat&, const Scalar&)>(&binding_utils::matSetTo))
+ .function("setTo", select_overload<void(Mat&, const Scalar&, const Mat&)>(&binding_utils::matSetTo))
+ .function("size", select_overload<Size(const Mat&)>(&binding_utils::matSize))
+
+ .function("ptr", select_overload<val(const Mat&, int)>(&binding_utils::matPtr<unsigned char>))
+ .function("ptr", select_overload<val(const Mat&, int, int)>(&binding_utils::matPtr<unsigned char>))
+ .function("ucharPtr", select_overload<val(const Mat&, int)>(&binding_utils::matPtr<unsigned char>))
+ .function("ucharPtr", select_overload<val(const Mat&, int, int)>(&binding_utils::matPtr<unsigned char>))
+ .function("charPtr", select_overload<val(const Mat&, int)>(&binding_utils::matPtr<char>))
+ .function("charPtr", select_overload<val(const Mat&, int, int)>(&binding_utils::matPtr<char>))
+ .function("shortPtr", select_overload<val(const Mat&, int)>(&binding_utils::matPtr<short>))
+ .function("shortPtr", select_overload<val(const Mat&, int, int)>(&binding_utils::matPtr<short>))
+ .function("ushortPtr", select_overload<val(const Mat&, int)>(&binding_utils::matPtr<unsigned short>))
+ .function("ushortPtr", select_overload<val(const Mat&, int, int)>(&binding_utils::matPtr<unsigned short>))
+ .function("intPtr", select_overload<val(const Mat&, int)>(&binding_utils::matPtr<int>))
+ .function("intPtr", select_overload<val(const Mat&, int, int)>(&binding_utils::matPtr<int>))
+ .function("floatPtr", select_overload<val(const Mat&, int)>(&binding_utils::matPtr<float>))
+ .function("floatPtr", select_overload<val(const Mat&, int, int)>(&binding_utils::matPtr<float>))
+ .function("doublePtr", select_overload<val(const Mat&, int)>(&binding_utils::matPtr<double>))
+ .function("doublePtr", select_overload<val(const Mat&, int, int)>(&binding_utils::matPtr<double>))
+
+ .function("charAt", select_overload<char&(int)>(&cv::Mat::at<char>))
+ .function("charAt", select_overload<char&(int, int)>(&cv::Mat::at<char>))
+ .function("charAt", select_overload<char&(int, int, int)>(&cv::Mat::at<char>))
+ .function("ucharAt", select_overload<unsigned char&(int)>(&cv::Mat::at<unsigned char>))
+ .function("ucharAt", select_overload<unsigned char&(int, int)>(&cv::Mat::at<unsigned char>))
+ .function("ucharAt", select_overload<unsigned char&(int, int, int)>(&cv::Mat::at<unsigned char>))
+ .function("shortAt", select_overload<short&(int)>(&cv::Mat::at<short>))
+ .function("shortAt", select_overload<short&(int, int)>(&cv::Mat::at<short>))
+ .function("shortAt", select_overload<short&(int, int, int)>(&cv::Mat::at<short>))
+ .function("ushortAt", select_overload<unsigned short&(int)>(&cv::Mat::at<unsigned short>))
+ .function("ushortAt", select_overload<unsigned short&(int, int)>(&cv::Mat::at<unsigned short>))
+ .function("ushortAt", select_overload<unsigned short&(int, int, int)>(&cv::Mat::at<unsigned short>))
+ .function("intAt", select_overload<int&(int)>(&cv::Mat::at<int>) )
+ .function("intAt", select_overload<int&(int, int)>(&cv::Mat::at<int>) )
+ .function("intAt", select_overload<int&(int, int, int)>(&cv::Mat::at<int>) )
+ .function("floatAt", select_overload<float&(int)>(&cv::Mat::at<float>))
+ .function("floatAt", select_overload<float&(int, int)>(&cv::Mat::at<float>))
+ .function("floatAt", select_overload<float&(int, int, int)>(&cv::Mat::at<float>))
+ .function("doubleAt", select_overload<double&(int, int, int)>(&cv::Mat::at<double>))
+ .function("doubleAt", select_overload<double&(int)>(&cv::Mat::at<double>))
+ .function("doubleAt", select_overload<double&(int, int)>(&cv::Mat::at<double>));
+
+ emscripten::value_object<cv::Range>("Range")
+ .field("start", &cv::Range::start)
+ .field("end", &cv::Range::end);
+
+ emscripten::value_object<cv::TermCriteria>("TermCriteria")
+ .field("type", &cv::TermCriteria::type)
+ .field("maxCount", &cv::TermCriteria::maxCount)
+ .field("epsilon", &cv::TermCriteria::epsilon);
+
+#define EMSCRIPTEN_CV_SIZE(type) \
+ emscripten::value_object<type>("#type") \
+ .field("width", &type::width) \
+ .field("height", &type::height);
+
+ EMSCRIPTEN_CV_SIZE(Size)
+ EMSCRIPTEN_CV_SIZE(Size2f)
+
+#define EMSCRIPTEN_CV_POINT(type) \
+ emscripten::value_object<type>("#type") \
+ .field("x", &type::x) \
+ .field("y", &type::y); \
+
+ EMSCRIPTEN_CV_POINT(Point)
+ EMSCRIPTEN_CV_POINT(Point2f)
+
+#define EMSCRIPTEN_CV_RECT(type, name) \
+ emscripten::value_object<cv::Rect_<type>> (name) \
+ .field("x", &cv::Rect_<type>::x) \
+ .field("y", &cv::Rect_<type>::y) \
+ .field("width", &cv::Rect_<type>::width) \
+ .field("height", &cv::Rect_<type>::height);
+
+ EMSCRIPTEN_CV_RECT(int, "Rect")
+ EMSCRIPTEN_CV_RECT(float, "Rect2f")
+
+ emscripten::value_object<cv::RotatedRect>("RotatedRect")
+ .field("center", &cv::RotatedRect::center)
+ .field("size", &cv::RotatedRect::size)
+ .field("angle", &cv::RotatedRect::angle);
+
+ function("rotatedRectPoints", select_overload<emscripten::val(const cv::RotatedRect&)>(&binding_utils::rotatedRectPoints));
+ function("rotatedRectBoundingRect", select_overload<Rect(const cv::RotatedRect&)>(&binding_utils::rotatedRectBoundingRect));
+ function("rotatedRectBoundingRect2f", select_overload<Rect2f(const cv::RotatedRect&)>(&binding_utils::rotatedRectBoundingRect2f));
+
+ emscripten::value_array<cv::Scalar_<double>> ("Scalar")
+ .element(index<0>())
+ .element(index<1>())
+ .element(index<2>())
+ .element(index<3>());
+
+ emscripten::value_object<binding_utils::MinMaxLoc>("MinMaxLoc")
+ .field("minVal", &binding_utils::MinMaxLoc::minVal)
+ .field("maxVal", &binding_utils::MinMaxLoc::maxVal)
+ .field("minLoc", &binding_utils::MinMaxLoc::minLoc)
+ .field("maxLoc", &binding_utils::MinMaxLoc::maxLoc);
+
+ emscripten::value_object<binding_utils::Circle>("Circle")
+ .field("center", &binding_utils::Circle::center)
+ .field("radius", &binding_utils::Circle::radius);
+
+ emscripten::value_object<cv::Moments >("Moments")
+ .field("m00", &cv::Moments::m00)
+ .field("m10", &cv::Moments::m10)
+ .field("m01", &cv::Moments::m01)
+ .field("m20", &cv::Moments::m20)
+ .field("m11", &cv::Moments::m11)
+ .field("m02", &cv::Moments::m02)
+ .field("m30", &cv::Moments::m30)
+ .field("m21", &cv::Moments::m21)
+ .field("m12", &cv::Moments::m12)
+ .field("m03", &cv::Moments::m03)
+ .field("mu20", &cv::Moments::mu20)
+ .field("mu11", &cv::Moments::mu11)
+ .field("mu02", &cv::Moments::mu02)
+ .field("mu30", &cv::Moments::mu30)
+ .field("mu21", &cv::Moments::mu21)
+ .field("mu12", &cv::Moments::mu12)
+ .field("mu03", &cv::Moments::mu03)
+ .field("nu20", &cv::Moments::nu20)
+ .field("nu11", &cv::Moments::nu11)
+ .field("nu02", &cv::Moments::nu02)
+ .field("nu30", &cv::Moments::nu30)
+ .field("nu21", &cv::Moments::nu21)
+ .field("nu12", &cv::Moments::nu12)
+ .field("nu03", &cv::Moments::nu03);
+
+ emscripten::value_object<cv::Exception>("Exception")
+ .field("code", &cv::Exception::code)
+ .field("msg", &binding_utils::getExceptionMsg, &binding_utils::setExceptionMsg);
+
+ function("exceptionFromPtr", &binding_utils::exceptionFromPtr, allow_raw_pointers());
+
+ function("minEnclosingCircle", select_overload<binding_utils::Circle(const cv::Mat&)>(&binding_utils::minEnclosingCircle));
+
+ function("minMaxLoc", select_overload<binding_utils::MinMaxLoc(const cv::Mat&, const cv::Mat&)>(&binding_utils::minMaxLoc));
+
+ function("minMaxLoc", select_overload<binding_utils::MinMaxLoc(const cv::Mat&)>(&binding_utils::minMaxLoc_1));
+
+ function("morphologyDefaultBorderValue", &cv::morphologyDefaultBorderValue);
+
+ function("CV_MAT_DEPTH", &binding_utils::cvMatDepth);
+
+ function("CamShift", select_overload<emscripten::val(const cv::Mat&, Rect&, TermCriteria)>(&binding_utils::CamShiftWrapper));
+
+ function("meanShift", select_overload<emscripten::val(const cv::Mat&, Rect&, TermCriteria)>(&binding_utils::meanShiftWrapper));
+
+ function("getBuildInformation", &binding_utils::getBuildInformation);
+
+ constant("CV_8UC1", CV_8UC1);
+ constant("CV_8UC2", CV_8UC2);
+ constant("CV_8UC3", CV_8UC3);
+ constant("CV_8UC4", CV_8UC4);
+
+ constant("CV_8SC1", CV_8SC1);
+ constant("CV_8SC2", CV_8SC2);
+ constant("CV_8SC3", CV_8SC3);
+ constant("CV_8SC4", CV_8SC4);
+
+ constant("CV_16UC1", CV_16UC1);
+ constant("CV_16UC2", CV_16UC2);
+ constant("CV_16UC3", CV_16UC3);
+ constant("CV_16UC4", CV_16UC4);
+
+ constant("CV_16SC1", CV_16SC1);
+ constant("CV_16SC2", CV_16SC2);
+ constant("CV_16SC3", CV_16SC3);
+ constant("CV_16SC4", CV_16SC4);
+
+ constant("CV_32SC1", CV_32SC1);
+ constant("CV_32SC2", CV_32SC2);
+ constant("CV_32SC3", CV_32SC3);
+ constant("CV_32SC4", CV_32SC4);
+
+ constant("CV_32FC1", CV_32FC1);
+ constant("CV_32FC2", CV_32FC2);
+ constant("CV_32FC3", CV_32FC3);
+ constant("CV_32FC4", CV_32FC4);
+
+ constant("CV_64FC1", CV_64FC1);
+ constant("CV_64FC2", CV_64FC2);
+ constant("CV_64FC3", CV_64FC3);
+ constant("CV_64FC4", CV_64FC4);
+
+ constant("CV_8U", CV_8U);
+ constant("CV_8S", CV_8S);
+ constant("CV_16U", CV_16U);
+ constant("CV_16S", CV_16S);
+ constant("CV_32S", CV_32S);
+ constant("CV_32F", CV_32F);
+ constant("CV_64F", CV_64F);
+
+ constant("INT_MIN", INT_MIN);
+ constant("INT_MAX", INT_MAX);
+}
--- /dev/null
+###############################################################################
+#
+# IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+#
+# By downloading, copying, installing or using the software you agree to this license.
+# If you do not agree to this license, do not download, install,
+# copy or use the software.
+#
+#
+# License Agreement
+# For Open Source Computer Vision Library
+#
+# Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+# Third party copyrights are property of their respective owners.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistribution's of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistribution's in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * The name of the copyright holders may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# This software is provided by the copyright holders and contributors "as is" and
+# any express or implied warranties, including, but not limited to, the implied
+# warranties of merchantability and fitness for a particular purpose are disclaimed.
+# In no event shall the Intel Corporation or contributors be liable for any direct,
+# indirect, incidental, special, exemplary, or consequential damages
+# (including, but not limited to, procurement of substitute goods or services;
+# loss of use, data, or profits; or business interruption) however caused
+# and on any theory of liability, whether in contract, strict liability,
+# or tort (including negligence or otherwise) arising in any way out of
+# the use of this software, even if advised of the possibility of such damage.
+#
+
+###############################################################################
+# AUTHOR: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+#
+# LICENSE AGREEMENT
+# Copyright (c) 2015, 2015 The Regents of the University of California (Regents)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of the University nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+###############################################################################
+
+from __future__ import print_function
+import sys, re, os
+from templates import *
+from sets import Set
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from cStringIO import StringIO
+
+
+func_table = {}
+
+# Ignore these functions due to Embind limitations for now
+ignore_list = ['locate', #int&
+ 'minEnclosingCircle', #float&
+ 'checkRange',
+ 'minMaxLoc', #double*
+ 'floodFill',
+ 'phaseCorrelate',
+ 'randShuffle',
+ 'calibrationMatrixValues', #double&
+ 'undistortPoints', # global redefinition
+ 'CamShift', #Rect&
+ 'meanShift' #Rect&
+ ]
+
+# Classes and methods whitelist
+core = {'': ['absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cartToPolar',\
+ 'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen', \
+ 'exp', 'flip', 'getOptimalDFTSize','gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude', \
+ 'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize', \
+ 'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'setIdentity', 'setRNGSeed', \
+ 'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat'],
+ 'Algorithm': []}
+
+imgproc = {'': ['Canny', 'GaussianBlur', 'Laplacian', 'HoughLines', 'HoughLinesP', 'HoughCircles', 'Scharr','Sobel', \
+ 'adaptiveThreshold','approxPolyDP','arcLength','bilateralFilter','blur','boundingRect','boxFilter',\
+ 'calcBackProject','calcHist','circle','compareHist','connectedComponents','connectedComponentsWithStats', \
+ 'contourArea', 'convexHull', 'convexityDefects', 'cornerHarris','cornerMinEigenVal','createCLAHE', \
+ 'createLineSegmentDetector','cvtColor','demosaicing','dilate', 'distanceTransform','distanceTransformWithLabels', \
+ 'drawContours','ellipse','ellipse2Poly','equalizeHist','erode', 'filter2D', 'findContours','fitEllipse', \
+ 'fitLine', 'floodFill','getAffineTransform', 'getPerspectiveTransform', 'getRotationMatrix2D', 'getStructuringElement', \
+ 'goodFeaturesToTrack','grabCut','initUndistortRectifyMap', 'integral','integral2', 'isContourConvex', 'line', \
+ 'matchShapes', 'matchTemplate','medianBlur', 'minAreaRect', 'minEnclosingCircle', 'moments', 'morphologyEx', \
+ 'pointPolygonTest', 'putText','pyrDown','pyrUp','rectangle','remap', 'resize','sepFilter2D','threshold', \
+ 'undistort','warpAffine','warpPerspective','watershed'],
+ 'CLAHE': ['apply', 'collectGarbage', 'getClipLimit', 'getTilesGridSize', 'setClipLimit', 'setTilesGridSize']}
+
+objdetect = {'': ['groupRectangles'],
+ 'HOGDescriptor': ['load', 'HOGDescriptor', 'getDefaultPeopleDetector', 'getDaimlerPeopleDetector', 'setSVMDetector', 'detectMultiScale'],
+ 'CascadeClassifier': ['load', 'detectMultiScale2', 'CascadeClassifier', 'detectMultiScale3', 'empty', 'detectMultiScale']}
+
+video = {'': ['CamShift', 'calcOpticalFlowFarneback', 'calcOpticalFlowPyrLK', 'createBackgroundSubtractorMOG2', 'estimateRigidTransform',\
+ 'findTransformECC', 'meanShift'],
+ 'BackgroundSubtractorMOG2': ['BackgroundSubtractorMOG2', 'apply'],
+ 'BackgroundSubtractor': ['apply', 'getBackgroundImage']}
+
+def makeWhiteList(module_list):
+ wl = {}
+ for m in module_list:
+ for k in m.keys():
+ if k in wl:
+ wl[k] += m[k]
+ else:
+ wl[k] = m[k]
+ return wl
+
+white_list = makeWhiteList([core, imgproc, objdetect, video])
+
+# Features to be exported
+export_enums = False
+export_consts = True
+with_wrapped_functions = True
+with_default_params = True
+with_vec_from_js_array = True
+
+wrapper_namespace = "Wrappers"
+type_dict = {
+ 'InputArray': 'const cv::Mat&',
+ 'OutputArray': 'cv::Mat&',
+ 'InputOutputArray': 'cv::Mat&',
+ 'InputArrayOfArrays': 'const std::vector<cv::Mat>&',
+ 'OutputArrayOfArrays': 'std::vector<cv::Mat>&',
+ 'String': 'std::string',
+ 'const String&':'const std::string&'
+}
+
+def normalize_class_name(name):
+ return re.sub(r"^cv\.", "", name).replace(".", "_")
+
+
+class ClassProp(object):
+ def __init__(self, decl):
+ self.tp = decl[0].replace("*", "_ptr").strip()
+ self.name = decl[1]
+ self.readonly = True
+ if "/RW" in decl[3]:
+ self.readonly = False
+
+
+class ClassInfo(object):
+ def __init__(self, name, decl=None):
+ self.cname = name.replace(".", "::")
+ self.name = self.wname = normalize_class_name(name)
+
+ self.ismap = False
+ self.issimple = False
+ self.isalgorithm = False
+ self.methods = {}
+ self.ext_constructors = {}
+ self.props = []
+ self.consts = {}
+ customname = False
+ self.jsfuncs = {}
+ self.constructor_arg_num = Set()
+
+ self.has_smart_ptr = False
+
+ if decl:
+ self.bases = decl[1].split()[1:]
+ if len(self.bases) > 1:
+ self.bases = [self.bases[0].strip(",")]
+ # return sys.exit(-1)
+ if self.bases and self.bases[0].startswith("cv::"):
+ self.bases[0] = self.bases[0][4:]
+ if self.bases and self.bases[0] == "Algorithm":
+ self.isalgorithm = True
+ for m in decl[2]:
+ if m.startswith("="):
+ self.wname = m[1:]
+ customname = True
+ elif m == "/Map":
+ self.ismap = True
+ elif m == "/Simple":
+ self.issimple = True
+ self.props = [ClassProp(p) for p in decl[3]]
+
+ if not customname and self.wname.startswith("Cv"):
+ self.wname = self.wname[2:]
+
+
+def handle_ptr(tp):
+ if tp.startswith('Ptr_'):
+ tp = 'Ptr<' + "::".join(tp.split('_')[1:]) + '>'
+ return tp
+
+def handle_vector(tp):
+ if tp.startswith('vector_'):
+ tp = 'std::vector<' + "::".join(tp.split('_')[1:]) + '>'
+ return tp
+
+
+class ArgInfo(object):
+ def __init__(self, arg_tuple):
+ self.tp = handle_ptr(arg_tuple[0]).strip()
+ self.name = arg_tuple[1]
+ self.defval = arg_tuple[2]
+ self.isarray = False
+ self.arraylen = 0
+ self.arraycvt = None
+ self.inputarg = True
+ self.outputarg = False
+ self.returnarg = False
+ self.const = False
+ self.reference = False
+ for m in arg_tuple[3]:
+ if m == "/O":
+ self.inputarg = False
+ self.outputarg = True
+ self.returnarg = True
+ elif m == "/IO":
+ self.inputarg = True
+ self.outputarg = True
+ self.returnarg = True
+ elif m.startswith("/A"):
+ self.isarray = True
+ self.arraylen = m[2:].strip()
+ elif m.startswith("/CA"):
+ self.isarray = True
+ self.arraycvt = m[2:].strip()
+ elif m == "/C":
+ self.const = True
+ elif m == "/Ref":
+ self.reference = True
+ if self.tp == "Mat":
+ if self.outputarg:
+ self.tp = "cv::Mat&"
+ elif self.inputarg:
+ self.tp = "const cv::Mat&"
+ if self.tp == "vector_Mat":
+ if self.outputarg:
+ self.tp = "std::vector<cv::Mat>&"
+ elif self.inputarg:
+ self.tp = "const std::vector<cv::Mat>&"
+ self.tp = handle_vector(self.tp).strip()
+ if self.const:
+ self.tp = "const " + self.tp
+ if self.reference:
+ self.tp = self.tp + "&"
+ self.py_inputarg = False
+ self.py_outputarg = False
+
+class FuncVariant(object):
+ def __init__(self, class_name, name, decl, is_constructor, is_class_method, is_const, is_virtual, is_pure_virtual, ref_return, const_return):
+ self.class_name = class_name
+ self.name = self.wname = name
+ self.is_constructor = is_constructor
+ self.is_class_method = is_class_method
+ self.is_const = is_const
+ self.is_virtual = is_virtual
+ self.is_pure_virtual = is_pure_virtual
+ self.refret = ref_return
+ self.constret = const_return
+ self.rettype = handle_vector(handle_ptr(decl[1]).strip()).strip()
+ if self.rettype == "void":
+ self.rettype = ""
+ self.args = []
+ self.array_counters = {}
+
+ for a in decl[3]:
+ ainfo = ArgInfo(a)
+ if ainfo.isarray and not ainfo.arraycvt:
+ c = ainfo.arraylen
+ c_arrlist = self.array_counters.get(c, [])
+ if c_arrlist:
+ c_arrlist.append(ainfo.name)
+ else:
+ self.array_counters[c] = [ainfo.name]
+ self.args.append(ainfo)
+
+
+class FuncInfo(object):
+ def __init__(self, class_name, name, cname, namespace, isconstructor):
+ self.class_name = class_name
+ self.name = name
+ self.cname = cname
+ self.namespace = namespace
+ self.variants = []
+ self.is_constructor = isconstructor
+
+ def add_variant(self, variant):
+ self.variants.append(variant)
+
+
+class Namespace(object):
+ def __init__(self):
+ self.funcs = {}
+ self.enums = {}
+ self.consts = {}
+
+
+class JSWrapperGenerator(object):
+ def __init__(self):
+
+ self.bindings = []
+ self.wrapper_funcs = []
+
+ self.classes = {}
+ self.namespaces = {}
+ self.enums = {}
+
+ self.parser = hdr_parser.CppHeaderParser()
+ self.class_idx = 0
+
+ def add_class(self, stype, name, decl):
+ class_info = ClassInfo(name, decl)
+ class_info.decl_idx = self.class_idx
+ self.class_idx += 1
+
+ if class_info.name in self.classes:
+ print("Generator error: class %s (cpp_name=%s) already exists" \
+ % (class_info.name, class_info.cname))
+ sys.exit(-1)
+ self.classes[class_info.name] = class_info
+
+ if class_info.bases:
+ chunks = class_info.bases[0].split('::')
+ base = '_'.join(chunks)
+ while base not in self.classes and len(chunks) > 1:
+ del chunks[-2]
+ base = '_'.join(chunks)
+ if base not in self.classes:
+ print("Generator error: unable to resolve base %s for %s"
+ % (class_info.bases[0], class_info.name))
+ sys.exit(-1)
+ else:
+ class_info.bases[0] = "::".join(chunks)
+ class_info.isalgorithm |= self.classes[base].isalgorithm
+
+ def split_decl_name(self, name):
+ chunks = name.split('.')
+ namespace = chunks[:-1]
+ classes = []
+ while namespace and '.'.join(namespace) not in self.parser.namespaces:
+ classes.insert(0, namespace.pop())
+ return namespace, classes, chunks[-1]
+
+ def add_enum(self, decl):
+ name = decl[1]
+ namespace, classes, val = self.split_decl_name(name)
+ namespace = '.'.join(namespace)
+ val = '_'.join(classes + [name])
+ cname = name.replace('.', '::')
+ ns = self.namespaces.setdefault(namespace, Namespace())
+ if name in ns.enums:
+ print("Generator warning: constant %s (cname=%s) already exists" \
+ % (name, cname))
+ # sys.exit(-1)
+ else:
+ ns.enums[name] = []
+ for item in decl[3]:
+ ns.enums[name].append(item)
+
+ def add_const(self, name, decl):
+ cname = name.replace('.','::')
+ namespace, classes, name = self.split_decl_name(name)
+ namespace = '.'.join(namespace)
+ name = '_'.join(classes+[name])
+ ns = self.namespaces.setdefault(namespace, Namespace())
+ if name in ns.consts:
+ print("Generator error: constant %s (cname=%s) already exists" \
+ % (name, cname))
+ sys.exit(-1)
+ ns.consts[name] = cname
+
+ def add_func(self, decl):
+ namespace, classes, barename = self.split_decl_name(decl[0])
+ cpp_name = "::".join(namespace + classes + [barename])
+ name = barename
+ class_name = ''
+ bare_class_name = ''
+ if classes:
+ class_name = normalize_class_name('.'.join(namespace + classes))
+ bare_class_name = classes[-1]
+ namespace = '.'.join(namespace)
+
+ is_constructor = name == bare_class_name
+ is_class_method = False
+ is_const_method = False
+ is_virtual_method = False
+ is_pure_virtual_method = False
+ const_return = False
+ ref_return = False
+
+ for m in decl[2]:
+ if m == "/S":
+ is_class_method = True
+ elif m == "/C":
+ is_const_method = True
+ elif m == "/V":
+ is_virtual_method = True
+ elif m == "/PV":
+ is_pure_virtual_method = True
+ elif m == "/Ref":
+ ref_return = True
+ elif m == "/CRet":
+ const_return = True
+ elif m.startswith("="):
+ name = m[1:]
+
+ if class_name:
+ cpp_name = barename
+ func_map = self.classes[class_name].methods
+ else:
+ func_map = self.namespaces.setdefault(namespace, Namespace()).funcs
+
+ func = func_map.setdefault(name, FuncInfo(class_name, name, cpp_name, namespace, is_constructor))
+
+ variant = FuncVariant(class_name, name, decl, is_constructor, is_class_method, is_const_method,
+ is_virtual_method, is_pure_virtual_method, ref_return, const_return)
+ func.add_variant(variant)
+
+ def save(self, path, name, buf):
+ f = open(path + "/" + name, "wt")
+ f.write(buf.getvalue())
+ f.close()
+
+ def gen_function_binding_with_wrapper(self, func, class_info):
+
+ binding_text = None
+ wrapper_func_text = None
+
+ bindings = []
+ wrappers = []
+
+ for index, variant in enumerate(func.variants):
+
+ factory = False
+ if class_info and 'Ptr<' in variant.rettype:
+
+ factory = True
+ base_class_name = variant.rettype
+ base_class_name = base_class_name.replace("Ptr<","").replace(">","").strip()
+ if base_class_name in self.classes:
+ self.classes[base_class_name].has_smart_ptr = True
+ else:
+ print(base_class_name, ' not found in classes for registering smart pointer using ', class_info.name, 'instead')
+ self.classes[class_info.name].has_smart_ptr = True
+
+ def_args = []
+ has_def_param = False
+
+ # Return type
+ ret_type = 'void' if variant.rettype.strip() == '' else variant.rettype
+ if ret_type.startswith('Ptr'): #smart pointer
+ ptr_type = ret_type.replace('Ptr<', '').replace('>', '')
+ if ptr_type in type_dict:
+ ret_type = type_dict[ptr_type]
+ for key in type_dict:
+ if key in ret_type:
+ ret_type = ret_type.replace(key, type_dict[key])
+
+ arg_types = []
+ unwrapped_arg_types = []
+ for arg in variant.args:
+ arg_type = None
+ if arg.tp in type_dict:
+ arg_type = type_dict[arg.tp]
+ else:
+ arg_type = arg.tp
+ # Add default value
+ if with_default_params and arg.defval != '':
+ def_args.append(arg.defval);
+ arg_types.append(arg_type)
+ unwrapped_arg_types.append(arg_type)
+
+ # Function attribure
+ func_attribs = ''
+ if '*' in ''.join(arg_types):
+ func_attribs += ', allow_raw_pointers()'
+
+ if variant.is_pure_virtual:
+ func_attribs += ', pure_virtual()'
+
+
+ # Wrapper function
+ wrap_func_name = (func.class_name+"_" if class_info != None else "") + func.name.split("::")[-1] + "_wrapper"
+ js_func_name = func.name
+
+ # TODO: Name functions based wrap directives or based on arguments list
+ if index > 0:
+ wrap_func_name += str(index)
+ js_func_name += str(index)
+
+ c_func_name = 'Wrappers::' + wrap_func_name
+
+ # Binding template-
+ raw_arg_names = ['arg' + str(i + 1) for i in range(0, len(variant.args))]
+ arg_names = []
+ w_signature = []
+ casted_arg_types = []
+ for arg_type, arg_name in zip(arg_types, raw_arg_names):
+ casted_arg_name = arg_name
+ if with_vec_from_js_array:
+ # Only support const vector reference as input parameter
+ match = re.search(r'const std::vector<(.*)>&', arg_type)
+ if match:
+ type_in_vect = match.group(1)
+ if type_in_vect != 'cv::Mat':
+ casted_arg_name = 'emscripten::vecFromJSArray<' + type_in_vect + '>(' + arg_name + ')'
+ arg_type = re.sub(r'std::vector<(.*)>', 'emscripten::val', arg_type)
+ w_signature.append(arg_type + ' ' + arg_name)
+ arg_names.append(casted_arg_name)
+ casted_arg_types.append(arg_type)
+
+ arg_types = casted_arg_types
+
+ # Argument list, signature
+ arg_names_casted = [c if a == b else c + '.as<' + a + '>()' for a, b, c in
+ zip(unwrapped_arg_types, arg_types, arg_names)]
+
+ # Add self object to the parameters
+ if class_info and not factory:
+ arg_types = [class_info.cname + '&'] + arg_types
+ w_signature = [class_info.cname + '& arg0 '] + w_signature
+
+ for j in range(0, len(def_args) + 1):
+ postfix = ''
+ if j > 0:
+ postfix = '_' + str(j);
+
+ ###################################
+ # Wrapper
+ if factory: # TODO or static
+ name = class_info.cname+'::' if variant.class_name else ""
+ cpp_call_text = static_class_call_template.substitute(scope=name,
+ func=func.cname,
+ args=', '.join(arg_names[:len(arg_names)-j]))
+ elif class_info:
+ cpp_call_text = class_call_template.substitute(obj='arg0',
+ func=func.cname,
+ args=', '.join(arg_names[:len(arg_names)-j]))
+ else:
+ cpp_call_text = call_template.substitute(func=func.cname,
+ args=', '.join(arg_names[:len(arg_names)-j]))
+
+
+ wrapper_func_text = wrapper_function_template.substitute(ret_val=ret_type,
+ func=wrap_func_name+postfix,
+ signature=', '.join(w_signature[:len(w_signature)-j]),
+ cpp_call=cpp_call_text,
+ const='' if variant.is_const else '')
+
+ ###################################
+ # Binding
+ if class_info:
+ if factory:
+ # print("Factory Function: ", c_func_name, len(variant.args) - j, class_info.name)
+ if variant.is_pure_virtual:
+ # FIXME: workaround for pure virtual in constructor
+ # e.g. DescriptorMatcher_clone_wrapper
+ continue
+ # consider the default parameter variants
+ args_num = len(variant.args) - j
+ if args_num in class_info.constructor_arg_num:
+ # FIXME: workaournd for constructor overload with same args number
+ # e.g. DescriptorMatcher
+ continue
+ class_info.constructor_arg_num.add(args_num)
+ binding_text = ctr_template.substitute(const='const' if variant.is_const else '',
+ cpp_name=c_func_name+postfix,
+ ret=ret_type,
+ args=','.join(arg_types[:len(arg_types)-j]),
+ optional=func_attribs)
+ else:
+ binding_template = overload_class_static_function_template if variant.is_class_method else \
+ overload_class_function_template
+ binding_text = binding_template.substitute(js_name=js_func_name,
+ const='' if variant.is_const else '',
+ cpp_name=c_func_name+postfix,
+ ret=ret_type,
+ args=','.join(arg_types[:len(arg_types)-j]),
+ optional=func_attribs)
+ else:
+ binding_text = overload_function_template.substitute(js_name=js_func_name,
+ cpp_name=c_func_name+postfix,
+ const='const' if variant.is_const else '',
+ ret=ret_type,
+ args=', '.join(arg_types[:len(arg_types)-j]),
+ optional=func_attribs)
+
+ bindings.append(binding_text)
+ wrappers.append(wrapper_func_text)
+
+ return [bindings, wrappers]
+
+
+ def gen_function_binding(self, func, class_info):
+
+ if not class_info == None :
+ func_name = class_info.cname+'::'+func.cname
+ else :
+ func_name = func.cname
+
+ binding_text = None
+ binding_text_list = []
+
+ for index, variant in enumerate(func.variants):
+ factory = False
+ #TODO if variant.is_class_method and variant.rettype == ('Ptr<' + class_info.name + '>'):
+ if (not class_info == None) and variant.rettype == ('Ptr<' + class_info.name + '>') or (func.name.startswith("create") and variant.rettype):
+ factory = True
+ base_class_name = variant.rettype
+ base_class_name = base_class_name.replace("Ptr<","").replace(">","").strip()
+ if base_class_name in self.classes:
+ self.classes[base_class_name].has_smart_ptr = True
+ else:
+ print(base_class_name, ' not found in classes for registering smart pointer using ', class_info.name, 'instead')
+ self.classes[class_info.name].has_smart_ptr = True
+
+
+ # Return type
+ ret_type = 'void' if variant.rettype.strip() == '' else variant.rettype
+
+ ret_type = ret_type.strip()
+
+ if ret_type.startswith('Ptr'): #smart pointer
+ ptr_type = ret_type.replace('Ptr<', '').replace('>', '')
+ if ptr_type in type_dict:
+ ret_type = type_dict[ptr_type]
+ for key in type_dict:
+ if key in ret_type:
+ ret_type = ret_type.replace(key, type_dict[key])
+
+ if variant.constret and ret_type.startswith('const') == False:
+ ret_type = 'const ' + ret_type
+ if variant.refret and ret_type.endswith('&') == False:
+ ret_type += '&'
+
+ arg_types = []
+ orig_arg_types = []
+ def_args = []
+ for arg in variant.args:
+ if arg.tp in type_dict:
+ arg_type = type_dict[arg.tp]
+ else:
+ arg_type = arg.tp
+
+ #if arg.outputarg:
+ # arg_type += '&'
+ orig_arg_types.append(arg_type)
+ if with_default_params and arg.defval != '':
+ def_args.append(arg.defval)
+ arg_types.append(orig_arg_types[-1])
+
+ # Function attribure
+ func_attribs = ''
+ if '*' in ''.join(orig_arg_types):
+ func_attribs += ', allow_raw_pointers()'
+
+ if variant.is_pure_virtual:
+ func_attribs += ', pure_virtual()'
+
+ #TODO better naming
+ #if variant.name in self.jsfunctions:
+ #else
+ js_func_name = variant.name
+
+
+ c_func_name = func.cname if (factory and variant.is_class_method == False) else func_name
+
+
+ ################################### Binding
+ for j in range(0, len(def_args) + 1):
+ postfix = ''
+ if j > 0:
+ postfix = '_' + str(j);
+ if factory:
+ binding_text = ctr_template.substitute(const='const' if variant.is_const else '',
+ cpp_name=c_func_name+postfix,
+ ret=ret_type,
+ args=','.join(arg_types[:len(arg_types)-j]),
+ optional=func_attribs)
+ else:
+ binding_template = overload_class_static_function_template if variant.is_class_method else \
+ overload_function_template if class_info == None else overload_class_function_template
+ binding_text = binding_template.substitute(js_name=js_func_name,
+ const='const' if variant.is_const else '',
+ cpp_name=c_func_name+postfix,
+ ret=ret_type,
+ args=','.join(arg_types[:len(arg_types)-1]),
+ optional=func_attribs)
+
+ binding_text_list.append(binding_text)
+
+ return binding_text_list
+
+ def print_decls(self, decls):
+ """
+ Prints the list of declarations, retrieived by the parse() method
+ """
+ for d in decls:
+ print(d[0], d[1], ";".join(d[2]))
+ for a in d[3]:
+ print(" ", a[0], a[1], a[2], end="")
+ if a[3]:
+ print("; ".join(a[3]))
+ else:
+ print()
+
+ def gen(self, dst_file, src_files, core_bindings):
+ # step 1: scan the headers and extract classes, enums and functions
+ for hdr in src_files:
+ decls = self.parser.parse(hdr)
+ # print(hdr);
+ # self.print_decls(decls);
+ if len(decls) == 0:
+ continue
+ for decl in decls:
+ name = decl[0]
+ type = name[:name.find(" ")]
+ if type == "struct" or type == "class": # class/structure case
+ name = name[name.find(" ") + 1:].strip()
+ self.add_class(type, name, decl)
+ elif name.startswith("enum"): # enumerations
+ self.add_enum(decl)
+ elif name.startswith("const"):
+ # constant
+ self.add_const(name.replace("const ", "").strip(), decl)
+ else: # class/global function
+ self.add_func(decl)
+
+ # step 2: generate bindings
+ # Global functions
+ for ns_name, ns in sorted(self.namespaces.items()):
+ if ns_name.split('.')[0] != 'cv':
+ continue
+ for name, func in sorted(ns.funcs.items()):
+ if name in ignore_list:
+ continue
+ if not name in white_list['']:
+ continue
+
+ ext_cnst = False
+ # Check if the method is an external constructor
+ for variant in func.variants:
+ if "Ptr<" in variant.rettype:
+
+ # Register the smart pointer
+ base_class_name = variant.rettype
+ base_class_name = base_class_name.replace("Ptr<","").replace(">","").strip()
+ self.classes[base_class_name].has_smart_ptr = True
+
+ # Adds the external constructor
+ class_name = func.name.replace("create", "")
+ if not class_name in self.classes:
+ self.classes[base_class_name].methods[func.cname] = func
+ else:
+ self.classes[class_name].methods[func.cname] = func
+ ext_cnst = True
+ if ext_cnst:
+ continue
+
+ if with_wrapped_functions:
+ binding, wrapper = self.gen_function_binding_with_wrapper(func, class_info=None)
+ self.bindings += binding
+ self.wrapper_funcs += wrapper
+ else:
+ binding = self.gen_function_binding(func, class_info=None)
+ self.bindings+=binding
+
+ # generate code for the classes and their methods
+ class_list = list(self.classes.items())
+
+ for name, class_info in class_list:
+ class_bindings = []
+ if not name in white_list:
+ continue
+
+ # Generate bindings for methods
+ for method_name, method in class_info.methods.iteritems():
+ if method.cname in ignore_list:
+ continue
+ if not method.name in white_list[method.class_name]:
+ continue
+ if method.is_constructor:
+ for variant in method.variants:
+ args = []
+ for arg in variant.args:
+ args.append(arg.tp)
+ # print('Constructor: ', class_info.name, len(variant.args))
+ args_num = len(variant.args)
+ if args_num in class_info.constructor_arg_num:
+ continue
+ class_info.constructor_arg_num.add(args_num)
+ class_bindings.append(constructor_template.substitute(signature=', '.join(args)))
+ else:
+ if with_wrapped_functions and (len(method.variants) > 1 or len(method.variants[0].args)>0 or "String" in method.variants[0].rettype):
+ binding, wrapper = self.gen_function_binding_with_wrapper(method, class_info=class_info)
+ self.wrapper_funcs = self.wrapper_funcs + wrapper
+ class_bindings = class_bindings + binding
+ else:
+ binding = self.gen_function_binding(method, class_info=class_info)
+ class_bindings = class_bindings + binding
+
+ # Regiseter Smart pointer
+ if class_info.has_smart_ptr:
+ class_bindings.append(smart_ptr_reg_template.substitute(cname=class_info.cname, name=class_info.name))
+
+ # Attach external constructors
+ # for method_name, method in class_info.ext_constructors.iteritems():
+ # print("ext constructor", method_name)
+ #if class_info.ext_constructors:
+
+
+
+ # Generate bindings for properties
+ for property in class_info.props:
+ class_bindings.append(class_property_template.substitute(js_name=property.name, cpp_name='::'.join(
+ [class_info.cname, property.name])))
+
+ dv = ''
+ base = Template("""base<$base$isPoly>""")
+
+ assert len(class_info.bases) <= 1 , "multiple inheritance not supported"
+
+ if len(class_info.bases) == 1:
+ dv = "," + base.substitute(base=', '.join(class_info.bases),
+ isPoly = " ,true" if class_info.name=="Feature2D" else "")
+
+ self.bindings.append(class_template.substitute(cpp_name=class_info.cname,
+ js_name=name,
+ class_templates=''.join(class_bindings),
+ derivation=dv))
+
+ if export_enums:
+ # step 4: generate bindings for enums
+ # TODO anonymous enums are ignored for now.
+ for ns_name, ns in sorted(self.namespaces.items()):
+ if ns_name.split('.')[0] != 'cv':
+ continue
+ for name, enum in sorted(ns.enums.items()):
+ if not name.endswith('.anonymous'):
+ name = name.replace("cv.", "")
+ enum_values = []
+ for enum_val in enum:
+ value = enum_val[0][enum_val[0].rfind(".")+1:]
+ enum_values.append(enum_item_template.substitute(val=value,
+ cpp_val=name.replace('.', '::')+'::'+value))
+
+ self.bindings.append(enum_template.substitute(cpp_name=name.replace(".", "::"),
+ js_name=name.replace(".", "_"),
+ enum_items=''.join(enum_values)))
+ else:
+ print(name)
+ #TODO: represent anonymous enums with constants
+
+ if export_consts:
+ # step 5: generate bindings for consts
+ for ns_name, ns in sorted(self.namespaces.items()):
+ if ns_name.split('.')[0] != 'cv':
+ continue
+ for name, const in sorted(ns.consts.items()):
+ # print("Gen consts: ", name, const)
+ self.bindings.append(const_template.substitute(js_name=name, value=const))
+
+ with open(core_bindings) as f:
+ ret = f.read()
+
+ defis = '\n'.join(self.wrapper_funcs)
+ ret += wrapper_codes_template.substitute(ns=wrapper_namespace, defs=defis)
+ ret += emscripten_binding_template.substitute(binding_name='testBinding', bindings=''.join(self.bindings))
+
+
+ # print(ret)
+ text_file = open(dst_file, "w")
+ text_file.write(ret)
+ text_file.close()
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 4:
+ print("Usage:\n", \
+ os.path.basename(sys.argv[0]), \
+ "<full path to hdr_parser.py> <bindings.cpp> <headers.txt> <core_bindings.cpp>")
+ print("Current args are: ", ", ".join(["'"+a+"'" for a in sys.argv]))
+ exit(0)
+
+ dstdir = "."
+ hdr_parser_path = os.path.abspath(sys.argv[1])
+ if hdr_parser_path.endswith(".py"):
+ hdr_parser_path = os.path.dirname(hdr_parser_path)
+ sys.path.append(hdr_parser_path)
+ import hdr_parser
+
+ bindingsCpp = sys.argv[2]
+ headers = open(sys.argv[3], 'r').read().split(';')
+ coreBindings = sys.argv[4]
+ generator = JSWrapperGenerator()
+ generator.gen(bindingsCpp, headers, coreBindings)
--- /dev/null
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+
+Module['imread'] = function(imageSource) {
+ var img = null;
+ if (typeof imageSource === 'string') {
+ img = document.getElementById(imageSource);
+ } else {
+ img = imageSource;
+ }
+ var canvas = null;
+ var ctx = null;
+ if (img instanceof HTMLImageElement) {
+ canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0, img.width, img.height);
+ } else if (img instanceof HTMLCanvasElement) {
+ canvas = img;
+ ctx = canvas.getContext('2d');
+ } else {
+ throw new Error('Please input the valid canvas or img id.');
+ return;
+ }
+
+ var imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+ return cv.matFromImageData(imgData);
+};
+
+Module['imshow'] = function(canvasSource, mat) {
+ var canvas = null;
+ if (typeof canvasSource === 'string') {
+ canvas = document.getElementById(canvasSource);
+ } else {
+ canvas = canvasSource;
+ }
+ if (!(canvas instanceof HTMLCanvasElement)) {
+ throw new Error('Please input the valid canvas element or id.');
+ return;
+ }
+ if (!(mat instanceof cv.Mat)) {
+ throw new Error('Please input the valid cv.Mat instance.');
+ return;
+ }
+
+ // convert the mat type to cv.CV_8U
+ var img = new cv.Mat();
+ var depth = mat.type()%8;
+ var scale = depth <= cv.CV_8S? 1.0 : (depth <= cv.CV_32S? 1.0/256.0 : 255.0);
+ var shift = (depth === cv.CV_8S || depth === cv.CV_16S)? 128.0 : 0.0;
+ mat.convertTo(img, cv.CV_8U, scale, shift);
+
+ // convert the img type to cv.CV_8UC4
+ switch (img.type()) {
+ case cv.CV_8UC1:
+ cv.cvtColor(img, img, cv.COLOR_GRAY2RGBA);
+ break;
+ case cv.CV_8UC3:
+ cv.cvtColor(img, img, cv.COLOR_RGB2RGBA);
+ break;
+ case cv.CV_8UC4:
+ break;
+ default:
+ throw new Error('Bad number of channels (Source image must have 1, 3 or 4 channels)');
+ return;
+ }
+ var imgData = new ImageData(new Uint8ClampedArray(img.data), img.cols, img.rows);
+ var ctx = canvas.getContext('2d');
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ canvas.width = imgData.width;
+ canvas.height = imgData.height;
+ ctx.putImageData(imgData, 0, 0);
+ img.delete();
+};
+
+Module['VideoCapture'] = function(videoSource) {
+ var video = null;
+ if (typeof videoSource === 'string') {
+ video = document.getElementById(videoSource);
+ } else {
+ video = videoSource;
+ }
+ if (!(video instanceof HTMLVideoElement)) {
+ throw new Error('Please input the valid video element or id.');
+ return;
+ }
+ var canvas = document.createElement('canvas');
+ canvas.width = video.width;
+ canvas.height = video.height;
+ var ctx = canvas.getContext('2d');
+ this.video = video;
+ this.read = function(frame) {
+ if (!(frame instanceof cv.Mat)) {
+ throw new Error('Please input the valid cv.Mat instance.');
+ return;
+ }
+ if (frame.type() !== cv.CV_8UC4) {
+ throw new Error('Bad type of input mat: the type should be cv.CV_8UC4.');
+ return;
+ }
+ if (frame.cols !== video.width || frame.rows !== video.height) {
+ throw new Error('Bad size of input mat: the size should be same as the video.');
+ return;
+ }
+ ctx.drawImage(video, 0, 0, video.width, video.height);
+ frame.data.set(ctx.getImageData(0, 0, video.width, video.height).data);
+ };
+};
+
+function Range(start, end) {
+ this.start = typeof(start) === 'undefined' ? 0 : start;
+ this.end = typeof(end) === 'undefined' ? 0 : end;
+}
+
+Module['Range'] = Range;
+
+function Point(x, y) {
+ this.x = typeof(x) === 'undefined' ? 0 : x;
+ this.y = typeof(y) === 'undefined' ? 0 : y;
+}
+
+Module['Point'] = Point;
+
+function Size(width, height) {
+ this.width = typeof(width) === 'undefined' ? 0 : width;
+ this.height = typeof(height) === 'undefined' ? 0 : height;
+}
+
+Module['Size'] = Size;
+
+function Rect() {
+ switch (arguments.length) {
+ case 0: {
+ // new cv.Rect()
+ this.x = 0;
+ this.y = 0;
+ this.width = 0;
+ this.height = 0;
+ break;
+ }
+ case 1: {
+ // new cv.Rect(rect)
+ var rect = arguments[0];
+ this.x = rect.x;
+ this.y = rect.y;
+ this.width = rect.width;
+ this.height = rect.height;
+ break;
+ }
+ case 2: {
+ // new cv.Rect(point, size)
+ var point = arguments[0];
+ var size = arguments[1];
+ this.x = point.x;
+ this.y = point.y;
+ this.width = size.width;
+ this.height = size.height;
+ break;
+ }
+ case 4: {
+ // new cv.Rect(x, y, width, height)
+ this.x = arguments[0];
+ this.y = arguments[1];
+ this.width = arguments[2];
+ this.height = arguments[3];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+Module['Rect'] = Rect;
+
+function RotatedRect() {
+ switch (arguments.length) {
+ case 0: {
+ this.center = {x: 0, y: 0};
+ this.size = {width: 0, height: 0};
+ this.angle = 0;
+ break;
+ }
+ case 3: {
+ this.center = arguments[0];
+ this.size = arguments[1];
+ this.angle = arguments[2];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+RotatedRect.points = function(obj) {
+ return Module.rotatedRectPoints(obj);
+};
+
+RotatedRect.boundingRect = function(obj) {
+ return Module.rotatedRectBoundingRect(obj);
+};
+
+RotatedRect.boundingRect2f = function(obj) {
+ return Module.rotatedRectBoundingRect2f(obj);
+};
+
+Module['RotatedRect'] = RotatedRect;
+
+function Scalar(v0, v1, v2, v3) {
+ this.push(typeof(v0) === 'undefined' ? 0 : v0);
+ this.push(typeof(v1) === 'undefined' ? 0 : v1);
+ this.push(typeof(v2) === 'undefined' ? 0 : v2);
+ this.push(typeof(v3) === 'undefined' ? 0 : v3);
+}
+
+Scalar.prototype = new Array; // eslint-disable-line no-array-constructor
+
+Scalar.all = function(v) {
+ return new Scalar(v, v, v, v);
+};
+
+Module['Scalar'] = Scalar;
+
+function MinMaxLoc() {
+ switch (arguments.length) {
+ case 0: {
+ this.minVal = 0;
+ this.maxVal = 0;
+ this.minLoc = new Point();
+ this.maxLoc = new Point();
+ break;
+ }
+ case 4: {
+ this.minVal = arguments[0];
+ this.maxVal = arguments[1];
+ this.minLoc = arguments[2];
+ this.maxLoc = arguments[3];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+Module['MinMaxLoc'] = MinMaxLoc;
+
+function Circle() {
+ switch (arguments.length) {
+ case 0: {
+ this.center = new Point();
+ this.radius = 0;
+ break;
+ }
+ case 2: {
+ this.center = arguments[0];
+ this.radius = arguments[1];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+Module['Circle'] = Circle;
+
+function TermCriteria() {
+ switch (arguments.length) {
+ case 0: {
+ this.type = 0;
+ this.maxCount = 0;
+ this.epsilon = 0;
+ break;
+ }
+ case 3: {
+ this.type = arguments[0];
+ this.maxCount = arguments[1];
+ this.epsilon = arguments[2];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+Module['TermCriteria'] = TermCriteria;
+
+Module['matFromArray'] = function(rows, cols, type, array) {
+ var mat = new cv.Mat(rows, cols, type);
+ switch (type) {
+ case cv.CV_8U:
+ case cv.CV_8UC1:
+ case cv.CV_8UC2:
+ case cv.CV_8UC3:
+ case cv.CV_8UC4: {
+ mat.data.set(array);
+ break;
+ }
+ case cv.CV_8S:
+ case cv.CV_8SC1:
+ case cv.CV_8SC2:
+ case cv.CV_8SC3:
+ case cv.CV_8SC4: {
+ mat.data8S.set(array);
+ break;
+ }
+ case cv.CV_16U:
+ case cv.CV_16UC1:
+ case cv.CV_16UC2:
+ case cv.CV_16UC3:
+ case cv.CV_16UC4: {
+ mat.data16U.set(array);
+ break;
+ }
+ case cv.CV_16S:
+ case cv.CV_16SC1:
+ case cv.CV_16SC2:
+ case cv.CV_16SC3:
+ case cv.CV_16SC4: {
+ mat.data16S.set(array);
+ break;
+ }
+ case cv.CV_32S:
+ case cv.CV_32SC1:
+ case cv.CV_32SC2:
+ case cv.CV_32SC3:
+ case cv.CV_32SC4: {
+ mat.data32S.set(array);
+ break;
+ }
+ case cv.CV_32F:
+ case cv.CV_32FC1:
+ case cv.CV_32FC2:
+ case cv.CV_32FC3:
+ case cv.CV_32FC4: {
+ mat.data32F.set(array);
+ break;
+ }
+ case cv.CV_64F:
+ case cv.CV_64FC1:
+ case cv.CV_64FC2:
+ case cv.CV_64FC3:
+ case cv.CV_64FC4: {
+ mat.data64F.set(array);
+ break;
+ }
+ default: {
+ throw new Error('Type is unsupported');
+ }
+ }
+ return mat;
+};
+
+Module['matFromImageData'] = function(imageData) {
+ var mat = new cv.Mat(imageData.height, imageData.width, cv.CV_8UC4);
+ mat.data.set(imageData.data);
+ return mat;
+};
--- /dev/null
+###############################################################################
+#
+# IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+#
+# By downloading, copying, installing or using the software you agree to this license.
+# If you do not agree to this license, do not download, install,
+# copy or use the software.
+#
+#
+# License Agreement
+# For Open Source Computer Vision Library
+#
+# Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+# Third party copyrights are property of their respective owners.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistribution's of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistribution's in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * The name of the copyright holders may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# This software is provided by the copyright holders and contributors "as is" and
+# any express or implied warranties, including, but not limited to, the implied
+# warranties of merchantability and fitness for a particular purpose are disclaimed.
+# In no event shall the Intel Corporation or contributors be liable for any direct,
+# indirect, incidental, special, exemplary, or consequential damages
+# (including, but not limited to, procurement of substitute goods or services;
+# loss of use, data, or profits; or business interruption) however caused
+# and on any theory of liability, whether in contract, strict liability,
+# or tort (including negligence or otherwise) arising in any way out of
+# the use of this software, even if advised of the possibility of such damage.
+#
+
+###############################################################################
+# AUTHOR: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+#
+# LICENSE AGREEMENT
+# Copyright (c) 2015, 2015 The Regents of the University of California (Regents)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of the University nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+###############################################################################
+import os, sys, re, json, shutil
+from subprocess import Popen, PIPE, STDOUT
+
+def make_umd(opencvjs, cvjs):
+ src = open(opencvjs, 'r+b')
+ dst = open(cvjs, 'w+b')
+ content = src.read()
+ dst.seek(0)
+ # inspired by https://github.com/umdjs/umd/blob/95563fd6b46f06bda0af143ff67292e7f6ede6b7/templates/returnExportsGlobal.js
+ dst.write(("""
+(function (root, factory) {
+ if (typeof define === 'function' && define.amd) {
+ // AMD. Register as an anonymous module.
+ define(function () {
+ return (root.cv = factory());
+ });
+ } else if (typeof module === 'object' && module.exports) {
+ // Node. Does not work with strict CommonJS, but
+ // only CommonJS-like environments that support module.exports,
+ // like Node.
+ module.exports = factory();
+ } else {
+ // Browser globals
+ root.cv = factory();
+ }
+}(this, function () {
+ %s
+ if (typeof Module === 'undefined')
+ Module = {};
+ return cv(Module);
+}));
+ """ % (content)).lstrip())
+
+if __name__ == "__main__":
+ if len(sys.argv) > 2:
+ opencvjs = sys.argv[1]
+ cvjs = sys.argv[2]
+ make_umd(opencvjs, cvjs);
--- /dev/null
+###############################################################################
+#
+# IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+#
+# By downloading, copying, installing or using the software you agree to this license.
+# If you do not agree to this license, do not download, install,
+# copy or use the software.
+#
+#
+# License Agreement
+# For Open Source Computer Vision Library
+#
+# Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+# Third party copyrights are property of their respective owners.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistribution's of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistribution's in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * The name of the copyright holders may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# This software is provided by the copyright holders and contributors "as is" and
+# any express or implied warranties, including, but not limited to, the implied
+# warranties of merchantability and fitness for a particular purpose are disclaimed.
+# In no event shall the Intel Corporation or contributors be liable for any direct,
+# indirect, incidental, special, exemplary, or consequential damages
+# (including, but not limited to, procurement of substitute goods or services;
+# loss of use, data, or profits; or business interruption) however caused
+# and on any theory of liability, whether in contract, strict liability,
+# or tort (including negligence or otherwise) arising in any way out of
+# the use of this software, even if advised of the possibility of such damage.
+#
+
+###############################################################################
+# AUTHOR: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+#
+# LICENSE AGREEMENT
+# Copyright (c) 2015, 2015 The Regents of the University of California (Regents)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of the University nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+##############################################################################
+
+from string import Template
+
+wrapper_codes_template = Template("namespace $ns {\n$defs\n}")
+
+call_template = Template("""$func($args)""")
+class_call_template = Template("""$obj.$func($args)""")
+static_class_call_template = Template("""$scope$func($args)""")
+
+wrapper_function_template = Template(""" $ret_val $func($signature)$const {
+ return $cpp_call;
+ }
+ """)
+
+wrapper_function_with_def_args_template = Template(""" $ret_val $func($signature)$const {
+ $check_args
+ }
+ """)
+
+wrapper_overload_def_values = [
+ Template("""return $cpp_call;"""), Template("""if ($arg0.isUndefined())
+ return $cpp_call;
+ else
+ $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined() )
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined() && $arg6.isUndefined() )
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined()&& $arg6.isUndefined() && $arg7.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined()&& $arg6.isUndefined() && $arg7.isUndefined() &&
+ $arg8.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined()&& $arg6.isUndefined() && $arg7.isUndefined()&&
+ $arg8.isUndefined() && $arg9.isUndefined())
+ return $cpp_call;
+ else $next""")]
+
+emscripten_binding_template = Template("""
+
+EMSCRIPTEN_BINDINGS($binding_name) {$bindings
+}
+""")
+
+simple_function_template = Template("""
+ emscripten::function("$js_name", &$cpp_name);
+""")
+
+smart_ptr_reg_template = Template("""
+ .smart_ptr<Ptr<$cname>>("Ptr<$name>")
+""")
+
+overload_function_template = Template("""
+ function("$js_name", select_overload<$ret($args)$const>(&$cpp_name)$optional);
+""")
+
+overload_class_function_template = Template("""
+ .function("$js_name", select_overload<$ret($args)$const>(&$cpp_name)$optional)""")
+
+overload_class_static_function_template = Template("""
+ .class_function("$js_name", select_overload<$ret($args)$const>(&$cpp_name)$optional)""")
+
+class_property_template = Template("""
+ .property("$js_name", &$cpp_name)""")
+
+ctr_template = Template("""
+ .constructor(select_overload<$ret($args)$const>(&$cpp_name)$optional)""")
+
+smart_ptr_ctr_overload_template = Template("""
+ .smart_ptr_constructor("$ptr_type", select_overload<$ret($args)$const>(&$cpp_name)$optional)""")
+
+function_template = Template("""
+ .function("$js_name", &$cpp_name)""")
+
+static_function_template = Template("""
+ .class_function("$js_name", &$cpp_name)""")
+
+constructor_template = Template("""
+ .constructor<$signature>()""")
+
+enum_item_template = Template("""
+ .value("$val", $cpp_val)""")
+
+enum_template = Template("""
+ emscripten::enum_<$cpp_name>("$js_name")$enum_items;
+""")
+
+const_template = Template("""
+ constant("$js_name", +$value);
+""")
+
+vector_template = Template("""
+ emscripten::register_vector<$cType>("$js_name");
+""")
+
+map_template = Template("""
+ emscripten::register_map<cpp_type_key,$cpp_type_val>("$js_name");
+""")
+
+class_template = Template("""
+ emscripten::class_<$cpp_name $derivation>("$js_name")$class_templates;
+""")
--- /dev/null
+{
+ "extends": "google",
+ "parserOptions": {
+ "ecmaVersion": 6
+ },
+ "rules": {
+ "max-len": ["error", 100, {"ignoreUrls": true}],
+ "quotes": ["error", "single"],
+ "indent": ["error", 4, {"ArrayExpression": "first",
+ "CallExpression": {"arguments": "first"}}]
+ }
+}
--- /dev/null
+{
+ "name": "opencv_js_tests",
+ "description": "Tests for opencv js bindings",
+ "version": "1.0.0",
+ "dependencies" : {
+ "qunit" : "latest"
+ },
+ "devDependencies": {
+ "eslint" : "latest",
+ "eslint-config-google" : "latest"
+ },
+ "scripts": {
+ "test": "node tests.js"
+ },
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/opencv/opencv.git"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "BSD-4-Clause",
+ "bugs": {
+ "url": "https://github.com/opencv/opencv/issues"
+ },
+ "homepage": "https://github.com/opencv/opencv"
+}
--- /dev/null
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+}
+
+QUnit.module('Image Processing', {});
+
+QUnit.test('test_imgProc', function(assert) {
+ // calcHist
+ {
+ let vec1 = new cv.Mat.ones(new cv.Size(20, 20), cv.CV_8UC1); // eslint-disable-line new-cap
+ let source = new cv.MatVector();
+ source.push_back(vec1);
+ let channels = [0];
+ let histSize = [256];
+ let ranges =[0, 256];
+
+ let hist = new cv.Mat();
+ let mask = new cv.Mat();
+ let binSize = cv._malloc(4);
+ let binView = new Int32Array(cv.HEAP8.buffer, binSize);
+ binView[0] = 10;
+ cv.calcHist(source, channels, mask, hist, histSize, ranges, false);
+
+ // hist should contains a N X 1 arrary.
+ let size = hist.size();
+ assert.equal(size.height, 256);
+ assert.equal(size.width, 1);
+
+ // default parameters
+ cv.calcHist(source, channels, mask, hist, histSize, ranges);
+ size = hist.size();
+ assert.equal(size.height, 256);
+ assert.equal(size.width, 1);
+
+ // Do we need to verify data in histogram?
+ // let dataView = hist.data;
+
+ // Free resource
+ cv._free(binSize);
+ mask.delete();
+ hist.delete();
+ }
+
+ // cvtColor
+ {
+ let source = new cv.Mat(10, 10, cv.CV_8UC3);
+ let dest = new cv.Mat();
+
+ cv.cvtColor(source, dest, cv.COLOR_BGR2GRAY, 0);
+ assert.equal(dest.channels(), 1);
+
+ cv.cvtColor(source, dest, cv.COLOR_BGR2GRAY);
+ assert.equal(dest.channels(), 1);
+
+ cv.cvtColor(source, dest, cv.COLOR_BGR2BGRA, 0);
+ assert.equal(dest.channels(), 4);
+
+ cv.cvtColor(source, dest, cv.COLOR_BGR2BGRA);
+ assert.equal(dest.channels(), 4);
+
+ dest.delete();
+ source.delete();
+ }
+ // equalizeHist
+ {
+ let source = new cv.Mat(10, 10, cv.CV_8UC1);
+ let dest = new cv.Mat();
+
+ cv.equalizeHist(source, dest);
+
+ // eualizeHist changes the content of a image, but does not alter meta data
+ // of it.
+ assert.equal(source.channels(), dest.channels());
+ assert.equal(source.type(), dest.type());
+
+ dest.delete();
+ source.delete();
+ }
+});
+
+QUnit.test('test_segmentation', function(assert) {
+ const THRESHOLD = 127.0;
+ const THRESHOLD_MAX = 210.0;
+
+ // threshold
+ {
+ let source = new cv.Mat(1, 5, cv.CV_8UC1);
+ let sourceView = source.data;
+ sourceView[0] = 0; // < threshold
+ sourceView[1] = 100; // < threshold
+ sourceView[2] = 200; // > threshold
+
+ let dest = new cv.Mat();
+
+ cv.threshold(source, dest, THRESHOLD, THRESHOLD_MAX, cv.THRESH_BINARY);
+
+ let destView = dest.data;
+ assert.equal(destView[0], 0);
+ assert.equal(destView[1], 0);
+ assert.equal(destView[2], THRESHOLD_MAX);
+ }
+
+ // adaptiveThreshold
+ {
+ let source = cv.Mat.zeros(1, 5, cv.CV_8UC1);
+ let sourceView = source.data;
+ sourceView[0] = 50;
+ sourceView[1] = 150;
+ sourceView[2] = 200;
+
+ let dest = new cv.Mat();
+ const C = 0;
+ const blockSize = 3;
+ cv.adaptiveThreshold(source, dest, THRESHOLD_MAX,
+ cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, blockSize, C);
+
+ let destView = dest.data;
+ assert.equal(destView[0], 0);
+ assert.equal(destView[1], THRESHOLD_MAX);
+ assert.equal(destView[2], THRESHOLD_MAX);
+ }
+});
+
+QUnit.test('test_shape', function(assert) {
+ // moments
+ {
+ let points = new cv.Mat(1, 4, cv.CV_32SC2);
+ let data32S = points.data32S;
+ data32S[0]=50;
+ data32S[1]=56;
+ data32S[2]=53;
+ data32S[3]=53;
+ data32S[4]=46;
+ data32S[5]=54;
+ data32S[6]=49;
+ data32S[7]=51;
+
+ let m = cv.moments(points, false);
+ let area = cv.contourArea(points, false);
+
+ assert.equal(m.m00, 0);
+ assert.equal(m.m01, 0);
+ assert.equal(m.m10, 0);
+ assert.equal(area, 0);
+
+ // default parameters
+ m = cv.moments(points);
+ area = cv.contourArea(points);
+ assert.equal(m.m00, 0);
+ assert.equal(m.m01, 0);
+ assert.equal(m.m10, 0);
+ assert.equal(area, 0);
+
+ points.delete();
+ }
+});
+
+QUnit.test('test_min_enclosing', function(assert) {
+ {
+ let points = new cv.Mat(4, 1, cv.CV_32FC2);
+
+ points.data32F[0] = 0;
+ points.data32F[1] = 0;
+ points.data32F[2] = 1;
+ points.data32F[3] = 0;
+ points.data32F[4] = 1;
+ points.data32F[5] = 1;
+ points.data32F[6] = 0;
+ points.data32F[7] = 1;
+
+ let circle = cv.minEnclosingCircle(points);
+
+ assert.deepEqual(circle.center, {x: 0.5, y: 0.5});
+ assert.ok(Math.abs(circle.radius - Math.sqrt(2) / 2) < 0.001);
+
+ points.delete();
+ }
+});
+
+QUnit.test('test_filter', function(assert) {
+ // blur
+ {
+ let mat1 = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ let mat2 = new cv.Mat();
+
+ cv.blur(mat1, mat2, {height: 3, width: 3}, {x: -1, y: -1}, cv.BORDER_DEFAULT);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 5);
+ assert.equal(size.width, 5);
+
+ cv.blur(mat1, mat2, {height: 3, width: 3}, {x: -1, y: -1});
+
+ // Verify result.
+ size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 5);
+ assert.equal(size.width, 5);
+
+ cv.blur(mat1, mat2, {height: 3, width: 3});
+
+ // Verify result.
+ size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 5);
+ assert.equal(size.width, 5);
+
+ mat1.delete();
+ mat2.delete();
+ }
+
+ // GaussianBlur
+ {
+ let mat1 = cv.Mat.ones(7, 7, cv.CV_8UC1);
+ let mat2 = new cv.Mat();
+
+ cv.GaussianBlur(mat1, mat2, new cv.Size(3, 3), 0, 0, // eslint-disable-line new-cap
+ cv.BORDER_DEFAULT);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 1);
+ assert.equal(size.height, 7);
+ assert.equal(size.width, 7);
+ }
+
+ // medianBlur
+ {
+ let mat1 = cv.Mat.ones(9, 9, cv.CV_8UC3);
+ let mat2 = new cv.Mat();
+
+ cv.medianBlur(mat1, mat2, 3);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 9);
+ assert.equal(size.width, 9);
+ }
+
+ // Transpose
+ {
+ let mat1 = cv.Mat.eye(9, 9, cv.CV_8UC3);
+ let mat2 = new cv.Mat();
+
+ cv.transpose(mat1, mat2);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 9);
+ assert.equal(size.width, 9);
+ }
+
+ // bilateralFilter
+ {
+ let mat1 = cv.Mat.ones(11, 11, cv.CV_8UC3);
+ let mat2 = new cv.Mat();
+
+ cv.bilateralFilter(mat1, mat2, 3, 6, 1.5, cv.BORDER_DEFAULT);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+
+ // default parameters
+ cv.bilateralFilter(mat1, mat2, 3, 6, 1.5);
+ // Verify result.
+ size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+
+ mat1.delete();
+ mat2.delete();
+ }
+
+ // Watershed
+ {
+ let mat = cv.Mat.ones(11, 11, cv.CV_8UC3);
+ let out = new cv.Mat(11, 11, cv.CV_32SC1);
+
+ cv.watershed(mat, out);
+
+ // Verify result.
+ let size = out.size();
+ assert.equal(out.channels(), 1);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+ assert.equal(out.elemSize1(), 4);
+
+ mat.delete();
+ out.delete();
+ }
+
+ // Concat
+ {
+ let mat = cv.Mat.ones({height: 10, width: 5}, cv.CV_8UC3);
+ let mat2 = cv.Mat.eye({height: 10, width: 5}, cv.CV_8UC3);
+ let mat3 = cv.Mat.eye({height: 10, width: 5}, cv.CV_8UC3);
+
+
+ let out = new cv.Mat();
+ let input = new cv.MatVector();
+ input.push_back(mat);
+ input.push_back(mat2);
+ input.push_back(mat3);
+
+ cv.vconcat(input, out);
+
+ // Verify result.
+ let size = out.size();
+ assert.equal(out.channels(), 3);
+ assert.equal(size.height, 30);
+ assert.equal(size.width, 5);
+ assert.equal(out.elemSize1(), 1);
+
+ cv.hconcat(input, out);
+
+ // Verify result.
+ size = out.size();
+ assert.equal(out.channels(), 3);
+ assert.equal(size.height, 10);
+ assert.equal(size.width, 15);
+ assert.equal(out.elemSize1(), 1);
+
+ input.delete();
+ out.delete();
+ }
+
+
+ // distanceTransform letiants
+ {
+ let mat = cv.Mat.ones(11, 11, cv.CV_8UC1);
+ let out = new cv.Mat(11, 11, cv.CV_32FC1);
+ let labels = new cv.Mat(11, 11, cv.CV_32FC1);
+ const maskSize = 3;
+ cv.distanceTransform(mat, out, cv.DIST_L2, maskSize, cv.CV_32F);
+
+ // Verify result.
+ let size = out.size();
+ assert.equal(out.channels(), 1);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+ assert.equal(out.elemSize1(), 4);
+
+
+ cv.distanceTransformWithLabels(mat, out, labels, cv.DIST_L2, maskSize,
+ cv.DIST_LABEL_CCOMP);
+
+ // Verify result.
+ size = out.size();
+ assert.equal(out.channels(), 1);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+ assert.equal(out.elemSize1(), 4);
+
+ size = labels.size();
+ assert.equal(labels.channels(), 1);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+ assert.equal(labels.elemSize1(), 4);
+
+ mat.delete();
+ out.delete();
+ labels.delete();
+ }
+
+ // Min, Max
+ {
+ let data1 = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9]);
+ let data2 = new Uint8Array([0, 4, 0, 8, 0, 12, 0, 16, 0]);
+
+ let expectedMin = new Uint8Array([0, 2, 0, 4, 0, 6, 0, 8, 0]);
+ let expectedMax = new Uint8Array([1, 4, 3, 8, 5, 12, 7, 16, 9]);
+
+ let dataPtr = cv._malloc(3*3*1);
+ let dataPtr2 = cv._malloc(3*3*1);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 3*3*1);
+ dataHeap.set(new Uint8Array(data1.buffer));
+
+ let dataHeap2 = new Uint8Array(cv.HEAPU8.buffer, dataPtr2, 3*3*1);
+ dataHeap2.set(new Uint8Array(data2.buffer));
+
+
+ let mat1 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr, 0);
+ let mat2 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr2, 0);
+
+ let mat3 = new cv.Mat();
+
+ cv.min(mat1, mat2, mat3);
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedMin);
+
+
+ cv.max(mat1, mat2, mat3);
+ // Verify result.
+ size = mat2.size();
+ assert.equal(mat2.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedMax);
+
+ cv._free(dataPtr);
+ cv._free(dataPtr2);
+ }
+
+ // Bitwise operations
+ {
+ let data1 = new Uint8Array([0, 1, 2, 4, 8, 16, 32, 64, 128]);
+ let data2 = new Uint8Array([255, 255, 255, 255, 255, 255, 255, 255, 255]);
+
+ let expectedAnd = new Uint8Array([0, 1, 2, 4, 8, 16, 32, 64, 128]);
+ let expectedOr = new Uint8Array([255, 255, 255, 255, 255, 255, 255, 255, 255]);
+ let expectedXor = new Uint8Array([255, 254, 253, 251, 247, 239, 223, 191, 127]);
+
+ let expectedNot = new Uint8Array([255, 254, 253, 251, 247, 239, 223, 191, 127]);
+
+ let dataPtr = cv._malloc(3*3*1);
+ let dataPtr2 = cv._malloc(3*3*1);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 3*3*1);
+ dataHeap.set(new Uint8Array(data1.buffer));
+
+ let dataHeap2 = new Uint8Array(cv.HEAPU8.buffer, dataPtr2, 3*3*1);
+ dataHeap2.set(new Uint8Array(data2.buffer));
+
+
+ let mat1 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr, 0);
+ let mat2 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr2, 0);
+
+ let mat3 = new cv.Mat();
+ let none = new cv.Mat();
+
+ cv.bitwise_not(mat1, mat3, none);
+ // Verify result.
+ let size = mat3.size();
+ assert.equal(mat3.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedNot);
+
+ cv.bitwise_and(mat1, mat2, mat3, none);
+ // Verify result.
+ size = mat3.size();
+ assert.equal(mat3.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedAnd);
+
+
+ cv.bitwise_or(mat1, mat2, mat3, none);
+ // Verify result.
+ size = mat3.size();
+ assert.equal(mat3.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedOr);
+
+ cv.bitwise_xor(mat1, mat2, mat3, none);
+ // Verify result.
+ size = mat3.size();
+ assert.equal(mat3.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedXor);
+
+ cv._free(dataPtr);
+ cv._free(dataPtr2);
+ }
+
+ // Arithmatic operations
+ {
+ let data1 = new Uint8Array([0, 1, 2, 3, 4, 5, 6, 7, 8]);
+ let data2 = new Uint8Array([0, 2, 4, 6, 8, 10, 12, 14, 16]);
+ let data3 = new Uint8Array([0, 1, 0, 1, 0, 1, 0, 1, 0]);
+
+ // |data1 - data2|
+ let expectedAbsDiff = new Uint8Array([0, 1, 2, 3, 4, 5, 6, 7, 8]);
+ let expectedAdd = new Uint8Array([0, 3, 6, 9, 12, 15, 18, 21, 24]);
+
+ const alpha = 4;
+ const beta = -1;
+ const gamma = 3;
+ // 4*data1 - data2 + 3
+ let expectedWeightedAdd = new Uint8Array([3, 5, 7, 9, 11, 13, 15, 17, 19]);
+
+ let dataPtr = cv._malloc(3*3*1);
+ let dataPtr2 = cv._malloc(3*3*1);
+ let dataPtr3 = cv._malloc(3*3*1);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 3*3*1);
+ dataHeap.set(new Uint8Array(data1.buffer));
+ let dataHeap2 = new Uint8Array(cv.HEAPU8.buffer, dataPtr2, 3*3*1);
+ dataHeap2.set(new Uint8Array(data2.buffer));
+ let dataHeap3 = new Uint8Array(cv.HEAPU8.buffer, dataPtr3, 3*3*1);
+ dataHeap3.set(new Uint8Array(data3.buffer));
+
+ let mat1 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr, 0);
+ let mat2 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr2, 0);
+ let mat3 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr3, 0);
+
+ let dst = new cv.Mat();
+ let none = new cv.Mat();
+
+ cv.absdiff(mat1, mat2, dst);
+ // Verify result.
+ let size = dst.size();
+ assert.equal(dst.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(dst.data, expectedAbsDiff);
+
+ cv.add(mat1, mat2, dst, none, -1);
+ // Verify result.
+ size = dst.size();
+ assert.equal(dst.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(dst.data, expectedAdd);
+
+ cv.addWeighted(mat1, alpha, mat2, beta, gamma, dst, -1);
+ // Verify result.
+ size = dst.size();
+ assert.equal(dst.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(dst.data, expectedWeightedAdd);
+
+ // default parameter
+ cv.addWeighted(mat1, alpha, mat2, beta, gamma, dst);
+ // Verify result.
+ size = dst.size();
+ assert.equal(dst.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(dst.data, expectedWeightedAdd);
+
+ mat1.delete();
+ mat2.delete();
+ mat3.delete();
+ dst.delete();
+ none.delete();
+ }
+
+ // Integral letiants
+ {
+ let mat = cv.Mat.eye({height: 100, width: 100}, cv.CV_8UC3);
+ let sum = new cv.Mat();
+ let sqSum = new cv.Mat();
+ let title = new cv.Mat();
+
+ cv.integral(mat, sum, -1);
+
+ // Verify result.
+ let size = sum.size();
+ assert.equal(sum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ cv.integral2(mat, sum, sqSum, -1, -1);
+ // Verify result.
+ size = sum.size();
+ assert.equal(sum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ size = sqSum.size();
+ assert.equal(sqSum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ mat.delete();
+ sum.delete();
+ sqSum.delete();
+ title.delete();
+ }
+
+ // Mean, meanSTDev
+ {
+ let mat = cv.Mat.eye({height: 100, width: 100}, cv.CV_8UC3);
+ let sum = new cv.Mat();
+ let sqSum = new cv.Mat();
+ let title = new cv.Mat();
+
+ cv.integral(mat, sum, -1);
+
+ // Verify result.
+ let size = sum.size();
+ assert.equal(sum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ cv.integral2(mat, sum, sqSum, -1, -1);
+ // Verify result.
+ size = sum.size();
+ assert.equal(sum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ size = sqSum.size();
+ assert.equal(sqSum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ mat.delete();
+ sum.delete();
+ sqSum.delete();
+ title.delete();
+ }
+
+ // Invert
+ {
+ let inv1 = new cv.Mat();
+ let inv2 = new cv.Mat();
+ let inv3 = new cv.Mat();
+ let inv4 = new cv.Mat();
+
+
+ let data1 = new Float32Array([1, 0, 0,
+ 0, 1, 0,
+ 0, 0, 1]);
+ let data2 = new Float32Array([0, 0, 0,
+ 0, 5, 0,
+ 0, 0, 0]);
+ let data3 = new Float32Array([1, 1, 1, 0,
+ 0, 3, 1, 2,
+ 2, 3, 1, 0,
+ 1, 0, 2, 1]);
+ let data4 = new Float32Array([1, 4, 5,
+ 4, 2, 2,
+ 5, 2, 2]);
+
+ let expected1 = new Float32Array([1, 0, 0,
+ 0, 1, 0,
+ 0, 0, 1]);
+ // Inverse does not exist!
+ let expected3 = new Float32Array([-3, -1/2, 3/2, 1,
+ 1, 1/4, -1/4, -1/2,
+ 3, 1/4, -5/4, -1/2,
+ -3, 0, 1, 1]);
+ let expected4 = new Float32Array([0, -1, 1,
+ -1, 23/2, -9,
+ 1, -9, 7]);
+
+ let dataPtr1 = cv._malloc(3*3*4);
+ let dataPtr2 = cv._malloc(3*3*4);
+ let dataPtr3 = cv._malloc(4*4*4);
+ let dataPtr4 = cv._malloc(3*3*4);
+
+ let dataHeap = new Float32Array(cv.HEAP32.buffer, dataPtr1, 3*3);
+ dataHeap.set(new Float32Array(data1.buffer));
+ let dataHeap2 = new Float32Array(cv.HEAP32.buffer, dataPtr2, 3*3);
+ dataHeap2.set(new Float32Array(data2.buffer));
+ let dataHeap3 = new Float32Array(cv.HEAP32.buffer, dataPtr3, 4*4);
+ dataHeap3.set(new Float32Array(data3.buffer));
+ let dataHeap4 = new Float32Array(cv.HEAP32.buffer, dataPtr4, 3*3);
+ dataHeap4.set(new Float32Array(data4.buffer));
+
+ let mat1 = new cv.Mat(3, 3, cv.CV_32FC1, dataPtr1, 0);
+ let mat2 = new cv.Mat(3, 3, cv.CV_32FC1, dataPtr2, 0);
+ let mat3 = new cv.Mat(4, 4, cv.CV_32FC1, dataPtr3, 0);
+ let mat4 = new cv.Mat(3, 3, cv.CV_32FC1, dataPtr4, 0);
+
+ QUnit.assert.deepEqualWithTolerance = function( value, expected, tolerance ) {
+ for (let i = 0; i < value.length; i= i+1) {
+ this.pushResult( {
+ result: Math.abs(value[i]-expected[i]) < tolerance,
+ actual: value[i],
+ expected: expected[i],
+ } );
+ }
+ };
+
+ cv.invert(mat1, inv1, 0);
+ // Verify result.
+ let size = inv1.size();
+ assert.equal(inv1.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+ assert.deepEqualWithTolerance(inv1.data32F, expected1, 0.0001);
+
+
+ cv.invert(mat2, inv2, 0);
+ // Verify result.
+ assert.deepEqualWithTolerance(inv3.data32F, expected3, 0.0001);
+
+ cv.invert(mat3, inv3, 0);
+ // Verify result.
+ size = inv3.size();
+ assert.equal(inv3.channels(), 1);
+ assert.equal(size.height, 4);
+ assert.equal(size.width, 4);
+ assert.deepEqualWithTolerance(inv3.data32F, expected3, 0.0001);
+
+ cv.invert(mat3, inv3, 1);
+ // Verify result.
+ assert.deepEqualWithTolerance(inv3.data32F, expected3, 0.0001);
+
+ cv.invert(mat4, inv4, 2);
+ // Verify result.
+ assert.deepEqualWithTolerance(inv4.data32F, expected4, 0.0001);
+
+ cv.invert(mat4, inv4, 3);
+ // Verify result.
+ assert.deepEqualWithTolerance(inv4.data32F, expected4, 0.0001);
+
+ mat1.delete();
+ mat2.delete();
+ mat3.delete();
+ mat4.delete();
+ inv1.delete();
+ inv2.delete();
+ inv3.delete();
+ inv4.delete();
+ }
+});
--- /dev/null
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+}
+
+QUnit.module('Core', {});
+
+QUnit.test('test_mat_creation', function(assert) {
+ // Mat constructors.
+ // Mat::Mat(int rows, int cols, int type)
+ {
+ let mat = new cv.Mat(10, 20, cv.CV_8UC3);
+
+ assert.equal(mat.type(), cv.CV_8UC3);
+ assert.equal(mat.depth(), cv.CV_8U);
+ assert.equal(mat.channels(), 3);
+ assert.ok(mat.empty() === false);
+
+ let size = mat.size();
+ assert.equal(size.height, 10);
+ assert.equal(size.width, 20);
+
+ mat.delete();
+ }
+
+ // Mat::Mat(const Mat &)
+ {
+ // Copy from another Mat
+ let mat1 = new cv.Mat(10, 20, cv.CV_8UC3);
+ let mat2 = new cv.Mat(mat1);
+
+ assert.equal(mat2.type(), mat1.type());
+ assert.equal(mat2.depth(), mat1.depth());
+ assert.equal(mat2.channels(), mat1.channels());
+ assert.equal(mat2.empty(), mat1.empty());
+
+ let size1 = mat1.size;
+ let size2 = mat2.size();
+ assert.ok(size1[0] === size2[0]);
+ assert.ok(size1[1] === size2[1]);
+
+ mat1.delete();
+ mat2.delete();
+ }
+
+ // Mat::Mat(int rows, int cols, int type, void *data, size_t step=AUTO_STEP)
+ {
+ // 10 * 10 and one channel
+ let data = cv._malloc(10 * 10 * 1);
+ let mat = new cv.Mat(10, 10, cv.CV_8UC1, data, 0);
+
+ assert.equal(mat.type(), cv.CV_8UC1);
+ assert.equal(mat.depth(), cv.CV_8U);
+ assert.equal(mat.channels(), 1);
+ assert.ok(mat.empty() === false);
+
+ let size = mat.size();
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 10);
+
+ mat.delete();
+ }
+
+ // Mat::Mat(int rows, int cols, int type, const Scalar& scalar)
+ {
+ // 2 * 2 8UC4 mat
+ let mat = new cv.Mat(2, 2, cv.CV_8UC4, [0, 1, 2, 3]);
+
+ for (let r = 0; r < mat.rows; r++) {
+ for (let c = 0; c < mat.cols; c++) {
+ let element = mat.ptr(r, c);
+ assert.equal(element[0], 0);
+ assert.equal(element[1], 1);
+ assert.equal(element[2], 2);
+ assert.equal(element[3], 3);
+ }
+ }
+
+ mat.delete();
+ }
+
+ // Mat::create(int, int, int)
+ {
+ let mat = new cv.Mat();
+ mat.create(10, 5, cv.CV_8UC3);
+ let size = mat.size();
+
+ assert.ok(mat.type() === cv.CV_8UC3);
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 5);
+ assert.ok(mat.channels() === 3);
+
+ mat.delete();
+ }
+ // Mat::create(Size, int)
+ {
+ let mat = new cv.Mat();
+ mat.create({height: 10, width: 5}, cv.CV_8UC4);
+ let size = mat.size();
+
+ assert.ok(mat.type() === cv.CV_8UC4);
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 5);
+ assert.ok(mat.channels() === 4);
+
+ mat.delete();
+ }
+ // clone
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC1);
+ let mat2 = mat.clone();
+
+ assert.equal(mat.channels, mat2.channels);
+ assert.equal(mat.size().height, mat2.size().height);
+ assert.equal(mat.size().width, mat2.size().width);
+
+ assert.deepEqual(mat.data, mat2.data);
+
+
+ mat.delete();
+ mat2.delete();
+ }
+ // copyTo
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC1);
+ let mat2 = new cv.Mat();
+ mat.copyTo(mat2);
+
+ assert.equal(mat.channels, mat2.channels);
+ assert.equal(mat.size().height, mat2.size().height);
+ assert.equal(mat.size().width, mat2.size().width);
+
+ assert.deepEqual(mat.data, mat2.data);
+
+
+ mat.delete();
+ mat2.delete();
+ }
+ // copyTo1
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC1);
+ let mat2 = new cv.Mat();
+ let mask = new cv.Mat(5, 5, cv.CV_8UC1, new cv.Scalar(1));
+ mat.copyTo(mat2, mask);
+
+ assert.equal(mat.channels, mat2.channels);
+ assert.equal(mat.size().height, mat2.size().height);
+ assert.equal(mat.size().width, mat2.size().width);
+
+ assert.deepEqual(mat.data, mat2.data);
+
+
+ mat.delete();
+ mat2.delete();
+ mask.delete();
+ }
+
+ // matFromArray
+ {
+ let arrayC1 = [0, -1, 2, -3];
+ let arrayC2 = [0, -1, 2, -3, 4, -5, 6, -7];
+ let arrayC3 = [0, -1, 2, -3, 4, -5, 6, -7, 9, -9, 10, -11];
+ let arrayC4 = [0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, 13, 14, 15];
+
+ let mat8UC1 = cv.matFromArray(2, 2, cv.CV_8UC1, arrayC1);
+ let mat8UC2 = cv.matFromArray(2, 2, cv.CV_8UC2, arrayC2);
+ let mat8UC3 = cv.matFromArray(2, 2, cv.CV_8UC3, arrayC3);
+ let mat8UC4 = cv.matFromArray(2, 2, cv.CV_8UC4, arrayC4);
+
+ let mat8SC1 = cv.matFromArray(2, 2, cv.CV_8SC1, arrayC1);
+ let mat8SC2 = cv.matFromArray(2, 2, cv.CV_8SC2, arrayC2);
+ let mat8SC3 = cv.matFromArray(2, 2, cv.CV_8SC3, arrayC3);
+ let mat8SC4 = cv.matFromArray(2, 2, cv.CV_8SC4, arrayC4);
+
+ let mat16UC1 = cv.matFromArray(2, 2, cv.CV_16UC1, arrayC1);
+ let mat16UC2 = cv.matFromArray(2, 2, cv.CV_16UC2, arrayC2);
+ let mat16UC3 = cv.matFromArray(2, 2, cv.CV_16UC3, arrayC3);
+ let mat16UC4 = cv.matFromArray(2, 2, cv.CV_16UC4, arrayC4);
+
+ let mat16SC1 = cv.matFromArray(2, 2, cv.CV_16SC1, arrayC1);
+ let mat16SC2 = cv.matFromArray(2, 2, cv.CV_16SC2, arrayC2);
+ let mat16SC3 = cv.matFromArray(2, 2, cv.CV_16SC3, arrayC3);
+ let mat16SC4 = cv.matFromArray(2, 2, cv.CV_16SC4, arrayC4);
+
+ let mat32SC1 = cv.matFromArray(2, 2, cv.CV_32SC1, arrayC1);
+ let mat32SC2 = cv.matFromArray(2, 2, cv.CV_32SC2, arrayC2);
+ let mat32SC3 = cv.matFromArray(2, 2, cv.CV_32SC3, arrayC3);
+ let mat32SC4 = cv.matFromArray(2, 2, cv.CV_32SC4, arrayC4);
+
+ let mat32FC1 = cv.matFromArray(2, 2, cv.CV_32FC1, arrayC1);
+ let mat32FC2 = cv.matFromArray(2, 2, cv.CV_32FC2, arrayC2);
+ let mat32FC3 = cv.matFromArray(2, 2, cv.CV_32FC3, arrayC3);
+ let mat32FC4 = cv.matFromArray(2, 2, cv.CV_32FC4, arrayC4);
+
+ let mat64FC1 = cv.matFromArray(2, 2, cv.CV_64FC1, arrayC1);
+ let mat64FC2 = cv.matFromArray(2, 2, cv.CV_64FC2, arrayC2);
+ let mat64FC3 = cv.matFromArray(2, 2, cv.CV_64FC3, arrayC3);
+ let mat64FC4 = cv.matFromArray(2, 2, cv.CV_64FC4, arrayC4);
+
+ assert.deepEqual(mat8UC1.data, new Uint8Array(arrayC1));
+ assert.deepEqual(mat8UC2.data, new Uint8Array(arrayC2));
+ assert.deepEqual(mat8UC3.data, new Uint8Array(arrayC3));
+ assert.deepEqual(mat8UC4.data, new Uint8Array(arrayC4));
+
+ assert.deepEqual(mat8SC1.data8S, new Int8Array(arrayC1));
+ assert.deepEqual(mat8SC2.data8S, new Int8Array(arrayC2));
+ assert.deepEqual(mat8SC3.data8S, new Int8Array(arrayC3));
+ assert.deepEqual(mat8SC4.data8S, new Int8Array(arrayC4));
+
+ assert.deepEqual(mat16UC1.data16U, new Uint16Array(arrayC1));
+ assert.deepEqual(mat16UC2.data16U, new Uint16Array(arrayC2));
+ assert.deepEqual(mat16UC3.data16U, new Uint16Array(arrayC3));
+ assert.deepEqual(mat16UC4.data16U, new Uint16Array(arrayC4));
+
+ assert.deepEqual(mat16SC1.data16S, new Int16Array(arrayC1));
+ assert.deepEqual(mat16SC2.data16S, new Int16Array(arrayC2));
+ assert.deepEqual(mat16SC3.data16S, new Int16Array(arrayC3));
+ assert.deepEqual(mat16SC4.data16S, new Int16Array(arrayC4));
+
+ assert.deepEqual(mat32SC1.data32S, new Int32Array(arrayC1));
+ assert.deepEqual(mat32SC2.data32S, new Int32Array(arrayC2));
+ assert.deepEqual(mat32SC3.data32S, new Int32Array(arrayC3));
+ assert.deepEqual(mat32SC4.data32S, new Int32Array(arrayC4));
+
+ assert.deepEqual(mat32FC1.data32F, new Float32Array(arrayC1));
+ assert.deepEqual(mat32FC2.data32F, new Float32Array(arrayC2));
+ assert.deepEqual(mat32FC3.data32F, new Float32Array(arrayC3));
+ assert.deepEqual(mat32FC4.data32F, new Float32Array(arrayC4));
+
+ assert.deepEqual(mat64FC1.data64F, new Float64Array(arrayC1));
+ assert.deepEqual(mat64FC2.data64F, new Float64Array(arrayC2));
+ assert.deepEqual(mat64FC3.data64F, new Float64Array(arrayC3));
+ assert.deepEqual(mat64FC4.data64F, new Float64Array(arrayC4));
+
+ mat8UC1.delete();
+ mat8UC2.delete();
+ mat8UC3.delete();
+ mat8UC4.delete();
+ mat8SC1.delete();
+ mat8SC2.delete();
+ mat8SC3.delete();
+ mat8SC4.delete();
+ mat16UC1.delete();
+ mat16UC2.delete();
+ mat16UC3.delete();
+ mat16UC4.delete();
+ mat16SC1.delete();
+ mat16SC2.delete();
+ mat16SC3.delete();
+ mat16SC4.delete();
+ mat32SC1.delete();
+ mat32SC2.delete();
+ mat32SC3.delete();
+ mat32SC4.delete();
+ mat32FC1.delete();
+ mat32FC2.delete();
+ mat32FC3.delete();
+ mat32FC4.delete();
+ mat64FC1.delete();
+ mat64FC2.delete();
+ mat64FC3.delete();
+ mat64FC4.delete();
+ }
+
+ // matFromImageData
+ {
+ // Only test in browser
+ if (typeof window === 'undefined') {
+ return;
+ }
+ let canvas = window.document.createElement('canvas');
+ canvas.width = 2;
+ canvas.height = 2;
+ let ctx = canvas.getContext('2d');
+ ctx.fillStyle='#FF0000';
+ ctx.fillRect(0, 0, 1, 1);
+ ctx.fillRect(1, 1, 1, 1);
+
+ let imageData = ctx.getImageData(0, 0, 2, 2);
+ let mat = cv.matFromImageData(imageData);
+
+ assert.deepEqual(mat.data, new Uint8Array(imageData.data));
+
+ mat.delete();
+ }
+
+ // Mat(mat)
+ {
+ let mat = new cv.Mat(2, 2, cv.CV_8UC4, new cv.Scalar(1, 0, 1, 0));
+ let mat1 = new cv.Mat(mat);
+ let mat2 = mat;
+
+ assert.equal(mat.rows, mat1.rows);
+ assert.equal(mat.cols, mat1.cols);
+ assert.equal(mat.type(), mat1.type());
+ assert.deepEqual(mat.data, mat1.data);
+
+ mat.delete();
+
+ assert.equal(mat1.isDeleted(), false);
+ assert.equal(mat2.isDeleted(), true);
+
+ mat1.delete();
+ }
+
+ // mat.setTo
+ {
+ let mat = new cv.Mat(2, 2, cv.CV_8UC4);
+ let s = [0, 1, 2, 3];
+
+ mat.setTo(s);
+
+ assert.deepEqual(mat.ptr(0, 0), new Uint8Array(s));
+ assert.deepEqual(mat.ptr(0, 1), new Uint8Array(s));
+ assert.deepEqual(mat.ptr(1, 0), new Uint8Array(s));
+ assert.deepEqual(mat.ptr(1, 1), new Uint8Array(s));
+
+ let s1 = [0, 0, 0, 0];
+ mat.setTo(s1);
+ let mask = cv.matFromArray(2, 2, cv.CV_8UC1, [0, 1, 0, 1]);
+ mat.setTo(s, mask);
+
+ assert.deepEqual(mat.ptr(0, 0), new Uint8Array(s1));
+ assert.deepEqual(mat.ptr(0, 1), new Uint8Array(s));
+ assert.deepEqual(mat.ptr(1, 0), new Uint8Array(s1));
+ assert.deepEqual(mat.ptr(1, 1), new Uint8Array(s));
+
+ mat.delete();
+ mask.delete();
+ }
+});
+
+QUnit.test('test_mat_ptr', function(assert) {
+ const RValue = 3;
+ const GValue = 7;
+ const BValue = 197;
+
+ // cv.CV_8UC1 + Mat::ptr(int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_8UC1);
+ let view = mat.data;
+
+ // Alter matrix[2, 1].
+ let step = 10;
+ view[2 * step + 1] = RValue;
+
+ // Access matrix[2, 1].
+ view = mat.ptr(2);
+
+ assert.equal(view[1], RValue);
+
+ mat.delete();
+ }
+
+ // cv.CV_8UC3 + Mat::ptr(int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_8UC3);
+ let view = mat.data;
+
+ // Alter matrix[2, 1].
+ let step = 3 * 10;
+ view[2 * step + 3] = RValue;
+ view[2 * step + 3 + 1] = GValue;
+ view[2 * step + 3 + 2] = BValue;
+
+ // Access matrix[2, 1].
+ view = mat.ptr(2);
+
+ assert.equal(view[3], RValue);
+ assert.equal(view[3 + 1], GValue);
+ assert.equal(view[3 + 2], BValue);
+
+ mat.delete();
+ }
+
+ // cv.CV_8UC3 + Mat::ptr(int, int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_8UC3);
+ let view = mat.data;
+
+ // Alter matrix[2, 1].
+ let step = 3 * 10;
+ view[2 * step + 3] = RValue;
+ view[2 * step + 3 + 1] = GValue;
+ view[2 * step + 3 + 2] = BValue;
+
+ // Access matrix[2, 1].
+ view = mat.ptr(2, 1);
+
+ assert.equal(view[0], RValue);
+ assert.equal(view[1], GValue);
+ assert.equal(view[2], BValue);
+
+ mat.delete();
+ }
+
+ const RValueF32 = 3.3;
+ const GValueF32 = 7.3;
+ const BValueF32 = 197.3;
+ const EPSILON = 0.001;
+
+ // cv.CV_32FC1 + Mat::ptr(int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_32FC1);
+ let view = mat.data32F;
+
+ // Alter matrix[2, 1].
+ let step = 10;
+ view[2 * step + 1] = RValueF32;
+
+ // Access matrix[2, 1].
+ view = mat.floatPtr(2);
+
+ assert.ok(Math.abs(view[1] - RValueF32) < EPSILON);
+
+ mat.delete();
+ }
+
+ // cv.CV_32FC3 + Mat::ptr(int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_32FC3);
+ let view = mat.data32F;
+
+ // Alter matrix[2, 1].
+ let step = mat.step1(0);
+ view[2 * step + 3] = RValueF32;
+ view[2 * step + 3 + 1] = GValueF32;
+ view[2 * step + 3 + 2] = BValueF32;
+
+ // Access matrix[2, 1].
+ view = mat.floatPtr(2);
+
+ assert.ok(Math.abs(view[3] - RValueF32) < EPSILON);
+ assert.ok(Math.abs(view[3 + 1] - GValueF32) < EPSILON);
+ assert.ok(Math.abs(view[3 + 2] - BValueF32) < EPSILON);
+
+ mat.delete();
+ }
+
+ // cv.CV_32FC3 + Mat::ptr(int, int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_32FC3);
+ let view = mat.data32F;
+
+ // Alter matrix[2, 1].
+ let step = mat.step1(0);
+ view[2 * step + 3] = RValueF32;
+ view[2 * step + 3 + 1] = GValueF32;
+ view[2 * step + 3 + 2] = BValueF32;
+
+ // Access matrix[2, 1].
+ view = mat.floatPtr(2, 1);
+
+ assert.ok(Math.abs(view[0] - RValueF32) < EPSILON);
+ assert.ok(Math.abs(view[1] - GValueF32) < EPSILON);
+ assert.ok(Math.abs(view[2] - BValueF32) < EPSILON);
+
+ mat.delete();
+ }
+});
+
+QUnit.test('test_mat_zeros', function(assert) {
+ let zeros = new Uint8Array(10*10).fill(0);
+ // Mat::zeros(int, int, int)
+ {
+ let mat = cv.Mat.zeros(10, 10, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, zeros);
+
+ mat.delete();
+ }
+
+ // Mat::zeros(Size, int)
+ {
+ let mat = cv.Mat.zeros({height: 10, width: 10}, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, zeros);
+
+ mat.delete();
+ }
+});
+
+QUnit.test('test_mat_ones', function(assert) {
+ let ones = new Uint8Array(10*10).fill(1);
+ // Mat::ones(int, int, int)
+ {
+ let mat = cv.Mat.ones(10, 10, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, ones);
+ }
+ // Mat::ones(Size, int)
+ {
+ let mat = cv.Mat.ones({height: 10, width: 10}, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, ones);
+ }
+});
+
+QUnit.test('test_mat_eye', function(assert) {
+ let eye4by4 = new Uint8Array([1, 0, 0, 0,
+ 0, 1, 0, 0,
+ 0, 0, 1, 0,
+ 0, 0, 0, 1]);
+ // Mat::eye(int, int, int)
+ {
+ let mat = cv.Mat.eye(4, 4, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, eye4by4);
+ }
+
+ // Mat::eye(Size, int)
+ {
+ let mat = cv.Mat.eye({height: 4, width: 4}, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, eye4by4);
+ }
+});
+
+QUnit.test('test_mat_miscs', function(assert) {
+ // Mat::col(int)
+ {
+ let mat = cv.matFromArray(2, 2, cv.CV_8UC2, [1, 2, 3, 4, 5, 6, 7, 8]);
+ let col = mat.col(1);
+
+ assert.equal(col.isContinuous(), false);
+ assert.equal(col.ptr(0, 0)[0], 3);
+ assert.equal(col.ptr(0, 0)[1], 4);
+ assert.equal(col.ptr(1, 0)[0], 7);
+ assert.equal(col.ptr(1, 0)[1], 8);
+
+ col.delete();
+ mat.delete();
+ }
+
+ // Mat::row(int)
+ {
+ let mat = cv.Mat.zeros(5, 5, cv.CV_8UC2);
+ let row = mat.row(1);
+ let view = row.data;
+ assert.equal(view[0], 0);
+ assert.equal(view[4], 0);
+
+ row.delete();
+ mat.delete();
+ }
+
+ // Mat::convertTo(Mat, int, double, double)
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ let grayMat = cv.Mat.zeros(5, 5, cv.CV_8UC1);
+
+ mat.convertTo(grayMat, cv.CV_8U, 2, 1);
+ // dest = 2 * source(x, y) + 1.
+ let view = grayMat.data;
+ assert.equal(view[0], (1 * 2) + 1);
+
+ mat.convertTo(grayMat, cv.CV_8U);
+ // dest = 1 * source(x, y) + 0.
+ assert.equal(view[0], 1);
+
+ mat.convertTo(grayMat, cv.CV_8U, 2);
+ // dest = 2 * source(x, y) + 0.
+ assert.equal(view[0], 2);
+
+ grayMat.delete();
+ mat.delete();
+ }
+
+ // split
+ {
+ const R =7;
+ const G =13;
+ const B =29;
+
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ let view = mat.data;
+ view[0] = R;
+ view[1] = G;
+ view[2] = B;
+
+ let bgrPlanes = new cv.MatVector();
+ cv.split(mat, bgrPlanes);
+ assert.equal(bgrPlanes.size(), 3);
+
+ let rMat = bgrPlanes.get(0);
+ view = rMat.data;
+ assert.equal(view[0], R);
+
+ let gMat = bgrPlanes.get(1);
+ view = gMat.data;
+ assert.equal(view[0], G);
+
+ let bMat = bgrPlanes.get(2);
+ view = bMat.data;
+ assert.equal(view[0], B);
+
+ mat.delete();
+ rMat.delete();
+ gMat.delete();
+ bgrPlanes.delete();
+ bMat.delete();
+ }
+
+ // elemSize
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ assert.equal(mat.elemSize(), 3);
+ assert.equal(mat.elemSize1(), 1);
+
+ let mat2 = cv.Mat.zeros(5, 5, cv.CV_8UC1);
+ assert.equal(mat2.elemSize(), 1);
+ assert.equal(mat2.elemSize1(), 1);
+
+ let mat3 = cv.Mat.eye(5, 5, cv.CV_16UC3);
+ assert.equal(mat3.elemSize(), 2 * 3);
+ assert.equal(mat3.elemSize1(), 2);
+
+ mat.delete();
+ mat2.delete();
+ mat3.delete();
+ }
+
+ // step
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ assert.equal(mat.step[0], 15);
+ assert.equal(mat.step[1], 3);
+
+ let mat2 = cv.Mat.zeros(5, 5, cv.CV_8UC1);
+ assert.equal(mat2.step[0], 5);
+ assert.equal(mat2.step[1], 1);
+
+ let mat3 = cv.Mat.eye(5, 5, cv.CV_16UC3);
+ assert.equal(mat3.step[0], 30);
+ assert.equal(mat3.step[1], 6);
+
+ mat.delete();
+ mat2.delete();
+ mat3.delete();
+ }
+
+ // dot
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC1);
+ let mat2 = cv.Mat.eye(5, 5, cv.CV_8UC1);
+
+ assert.equal(mat.dot(mat), 25);
+ assert.equal(mat.dot(mat2), 5);
+ assert.equal(mat2.dot(mat2), 5);
+
+ mat.delete();
+ mat2.delete();
+ }
+
+ // mul
+ {
+ const FACTOR = 5;
+ let mat = cv.Mat.ones(4, 4, cv.CV_8UC1);
+ let mat2 = cv.Mat.eye(4, 4, cv.CV_8UC1);
+
+ let expected = new Uint8Array([FACTOR, 0, 0, 0,
+ 0, FACTOR, 0, 0,
+ 0, 0, FACTOR, 0,
+ 0, 0, 0, FACTOR]);
+ let mat3 = mat.mul(mat2, FACTOR);
+
+ assert.deepEqual(mat3.data, expected);
+
+ mat.delete();
+ mat2.delete();
+ mat3.delete();
+ }
+});
+
+
+QUnit.test('test mat access', function(assert) {
+ // test memory view
+ {
+ let data = new Uint8Array([0, 0, 0, 255, 0, 1, 2, 3]);
+ let dataPtr = cv._malloc(8);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 8);
+ dataHeap.set(new Uint8Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_8UC1, dataPtr, 0);
+
+
+ let unsignedCharView = new Uint8Array(data.buffer);
+ let charView = new Int8Array(data.buffer);
+ let shortView = new Int16Array(data.buffer);
+ let unsignedShortView = new Uint16Array(data.buffer);
+ let intView = new Int32Array(data.buffer);
+ let float32View = new Float32Array(data.buffer);
+ let float64View = new Float64Array(data.buffer);
+
+
+ assert.deepEqual(unsignedCharView, mat.data);
+ assert.deepEqual(charView, mat.data8S);
+ assert.deepEqual(shortView, mat.data16S);
+ assert.deepEqual(unsignedShortView, mat.data16U);
+ assert.deepEqual(intView, mat.data32S);
+ assert.deepEqual(float32View, mat.data32F);
+ assert.deepEqual(float64View, mat.data64F);
+ }
+
+ // test ucharAt(i)
+ {
+ let data = new Uint8Array([0, 0, 0, 255, 0, 1, 2, 3]);
+ let dataPtr = cv._malloc(8);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 8);
+ dataHeap.set(new Uint8Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_8UC1, dataPtr, 0);
+
+ assert.equal(mat.ucharAt(0), 0);
+ assert.equal(mat.ucharAt(1), 0);
+ assert.equal(mat.ucharAt(2), 0);
+ assert.equal(mat.ucharAt(3), 255);
+ assert.equal(mat.ucharAt(4), 0);
+ assert.equal(mat.ucharAt(5), 1);
+ assert.equal(mat.ucharAt(6), 2);
+ assert.equal(mat.ucharAt(7), 3);
+ }
+
+ // test ushortAt(i)
+ {
+ let data = new Uint16Array([0, 1000, 65000, 255, 0, 1, 2, 3]);
+ let dataPtr = cv._malloc(16);
+
+ let dataHeap = new Uint16Array(cv.HEAPU8.buffer, dataPtr, 8);
+ dataHeap.set(new Uint16Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_16SC1, dataPtr, 0);
+
+ assert.equal(mat.ushortAt(0), 0);
+ assert.equal(mat.ushortAt(1), 1000);
+ assert.equal(mat.ushortAt(2), 65000);
+ assert.equal(mat.ushortAt(3), 255);
+ assert.equal(mat.ushortAt(4), 0);
+ assert.equal(mat.ushortAt(5), 1);
+ assert.equal(mat.ushortAt(6), 2);
+ assert.equal(mat.ushortAt(7), 3);
+ }
+
+ // test intAt(i)
+ {
+ let data = new Int32Array([0, -1000, 65000, 255, -2000000, -1, 2, 3]);
+ let dataPtr = cv._malloc(32);
+
+ let dataHeap = new Int32Array(cv.HEAPU32.buffer, dataPtr, 8);
+ dataHeap.set(new Int32Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_32SC1, dataPtr, 0);
+
+ assert.equal(mat.intAt(0), 0);
+ assert.equal(mat.intAt(1), -1000);
+ assert.equal(mat.intAt(2), 65000);
+ assert.equal(mat.intAt(3), 255);
+ assert.equal(mat.intAt(4), -2000000);
+ assert.equal(mat.intAt(5), -1);
+ assert.equal(mat.intAt(6), 2);
+ assert.equal(mat.intAt(7), 3);
+ }
+
+ // test floatAt(i)
+ {
+ const EPSILON = 0.001;
+ let data = new Float32Array([0, -10.5, 650.001, 255, -20.1, -1.2, 2, 3.5]);
+ let dataPtr = cv._malloc(32);
+
+ let dataHeap = new Float32Array(cv.HEAPU32.buffer, dataPtr, 8);
+ dataHeap.set(new Float32Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_32FC1, dataPtr, 0);
+
+ assert.equal(Math.abs(mat.floatAt(0)-0) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(1)+10.5) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(2)-650.001) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(3)-255) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(4)+20.1) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(5)+1.2) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(6)-2) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(7)-3.5) < EPSILON, true);
+ }
+
+ // test intAt(i,j)
+ {
+ let mat = cv.Mat.eye({height: 3, width: 3}, cv.CV_32SC1);
+
+ assert.equal(mat.intAt(0, 0), 1);
+ assert.equal(mat.intAt(0, 1), 0);
+ assert.equal(mat.intAt(0, 2), 0);
+ assert.equal(mat.intAt(1, 0), 0);
+ assert.equal(mat.intAt(1, 1), 1);
+ assert.equal(mat.intAt(1, 2), 0);
+ assert.equal(mat.intAt(2, 0), 0);
+ assert.equal(mat.intAt(2, 1), 0);
+ assert.equal(mat.intAt(2, 2), 1);
+
+ mat.delete();
+ }
+});
+
+QUnit.test('test_mat_operations', function(assert) {
+ // test minMaxLoc
+ {
+ let src = cv.Mat.ones(4, 4, cv.CV_8UC1);
+
+ src.data[2] = 0;
+ src.data[5] = 2;
+
+ let result = cv.minMaxLoc(src);
+
+ assert.equal(result.minVal, 0);
+ assert.equal(result.maxVal, 2);
+ assert.deepEqual(result.minLoc, {x: 2, y: 0});
+ assert.deepEqual(result.maxLoc, {x: 1, y: 1});
+
+ src.delete();
+ }
+});
+
+QUnit.test('test_mat_roi', function(assert) {
+ // test minMaxLoc
+ {
+ let mat = cv.matFromArray(2, 2, cv.CV_8UC1, [0, 1, 2, 3]);
+ let roi = mat.roi(new cv.Rect(1, 1, 1, 1));
+
+ assert.equal(roi.rows, 1);
+ assert.equal(roi.cols, 1);
+ assert.deepEqual(roi.data, new Uint8Array([mat.ucharAt(1, 1)]));
+
+ mat.delete();
+ roi.delete();
+ }
+});
+
+
+QUnit.test('test_mat_range', function(assert) {
+ {
+ let src = cv.matFromArray(2, 2, cv.CV_8UC1, [0, 1, 2, 3]);
+ let mat = src.colRange(0, 1);
+
+ assert.equal(mat.isContinuous(), false);
+ assert.equal(mat.rows, 2);
+ assert.equal(mat.cols, 1);
+ assert.equal(mat.ucharAt(0), 0);
+ assert.equal(mat.ucharAt(1), 2);
+
+ mat.delete();
+
+ mat = src.colRange({start: 0, end: 1});
+
+ assert.equal(mat.isContinuous(), false);
+ assert.equal(mat.rows, 2);
+ assert.equal(mat.cols, 1);
+ assert.equal(mat.ucharAt(0), 0);
+ assert.equal(mat.ucharAt(1), 2);
+
+ mat.delete();
+
+ mat = src.rowRange(1, 2);
+
+ assert.equal(mat.rows, 1);
+ assert.equal(mat.cols, 2);
+ assert.deepEqual(mat.data, new Uint8Array([2, 3]));
+
+ mat.delete();
+
+ mat = src.rowRange({start: 1, end: 2});
+
+ assert.equal(mat.rows, 1);
+ assert.equal(mat.cols, 2);
+ assert.deepEqual(mat.data, new Uint8Array([2, 3]));
+
+ mat.delete();
+
+ src.delete();
+ }
+});
+
+QUnit.test('test_mat_diag', function(assert) {
+ // test diag
+ {
+ let mat = cv.matFromArray(3, 3, cv.CV_8UC1, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+ let d = mat.diag();
+ let d1 = mat.diag(1);
+ let d2 = mat.diag(-1);
+
+ assert.equal(mat.isContinuous(), true);
+ assert.equal(d.isContinuous(), false);
+ assert.equal(d1.isContinuous(), false);
+ assert.equal(d2.isContinuous(), false);
+
+ assert.equal(d.ucharAt(0), 0);
+ assert.equal(d.ucharAt(1), 4);
+ assert.equal(d.ucharAt(2), 8);
+
+ assert.equal(d1.ucharAt(0), 1);
+ assert.equal(d1.ucharAt(1), 5);
+
+ assert.equal(d2.ucharAt(0), 3);
+ assert.equal(d2.ucharAt(1), 7);
+
+ mat.delete();
+ d.delete();
+ d1.delete();
+ d2.delete();
+ }
+});
--- /dev/null
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+ cv.FS_createLazyFile('/', 'haarcascade_frontalface_default.xml', // eslint-disable-line new-cap
+ 'haarcascade_frontalface_default.xml', true, false);
+}
+
+QUnit.module('Object Detection', {});
+QUnit.test('Cascade classification', function(assert) {
+ // Group rectangle
+ {
+ let rectList = new cv.RectVector();
+ let weights = new cv.IntVector();
+ let groupThreshold = 1;
+ const eps = 0.2;
+
+ let rect1 = new cv.Rect(1, 2, 3, 4);
+ let rect2 = new cv.Rect(1, 4, 2, 3);
+
+ rectList.push_back(rect1);
+ rectList.push_back(rect2);
+
+ cv.groupRectangles(rectList, weights, groupThreshold, eps);
+
+
+ rectList.delete();
+ weights.delete();
+ }
+
+ // CascadeClassifier
+ {
+ let classifier = new cv.CascadeClassifier();
+ const modelPath = '/haarcascade_frontalface_default.xml';
+
+ assert.equal(classifier.empty(), true);
+
+
+ classifier.load(modelPath);
+ assert.equal(classifier.empty(), false);
+
+ let image = cv.Mat.eye({height: 10, width: 10}, cv.CV_8UC3);
+ let objects = new cv.RectVector();
+ let numDetections = new cv.IntVector();
+ const scaleFactor = 1.1;
+ const minNeighbors = 3;
+ const flags = 0;
+ const minSize = {height: 0, width: 0};
+ const maxSize = {height: 10, width: 10};
+
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor,
+ minNeighbors, flags, minSize, maxSize);
+
+ // test default parameters
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor,
+ minNeighbors, flags, minSize);
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor,
+ minNeighbors, flags);
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor,
+ minNeighbors);
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor);
+
+ classifier.delete();
+ objects.delete();
+ numDetections.delete();
+ }
+
+ // HOGDescriptor
+ {
+ let hog = new cv.HOGDescriptor();
+ let mat = new cv.Mat({height: 10, width: 10}, cv.CV_8UC1);
+ let descriptors = new cv.FloatVector();
+ let locations = new cv.PointVector();
+
+
+ assert.equal(hog.winSize.height, 128);
+ assert.equal(hog.winSize.width, 64);
+ assert.equal(hog.nbins, 9);
+ assert.equal(hog.derivAperture, 1);
+ assert.equal(hog.winSigma, -1);
+ assert.equal(hog.histogramNormType, 0);
+ assert.equal(hog.nlevels, 64);
+
+ hog.nlevels = 32;
+ assert.equal(hog.nlevels, 32);
+
+ hog.delete();
+ mat.delete();
+ descriptors.delete();
+ locations.delete();
+ }
+});
--- /dev/null
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+}
+QUnit.module('Utils', {});
+QUnit.test('Test vectors', function(assert) {
+ {
+ let pointVector = new cv.PointVector();
+ for (let i=0; i<100; ++i) {
+ pointVector.push_back({x: i, y: 2*i});
+ }
+
+ assert.equal(pointVector.size(), 100);
+
+ let index = 10;
+ let item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 0;
+ item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 99;
+ item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ pointVector.delete();
+ }
+
+ {
+ let pointVector = new cv.PointVector();
+ for (let i=0; i<100; ++i) {
+ pointVector.push_back(new cv.Point(i, 2*i));
+ }
+
+ pointVector.push_back(new cv.Point());
+
+ assert.equal(pointVector.size(), 101);
+
+ let index = 10;
+ let item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 0;
+ item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 99;
+ item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 100;
+ item = pointVector.get(index);
+ assert.equal(item.x, 0);
+ assert.equal(item.y, 0);
+
+ pointVector.delete();
+ }
+});
+QUnit.test('Test Rect', function(assert) {
+ let rectVector = new cv.RectVector();
+ let rect = {x: 1, y: 2, width: 3, height: 4};
+ rectVector.push_back(rect);
+ rectVector.push_back(new cv.Rect());
+ rectVector.push_back(new cv.Rect(rect));
+ rectVector.push_back(new cv.Rect({x: 5, y: 6}, {width: 7, height: 8}));
+ rectVector.push_back(new cv.Rect(9, 10, 11, 12));
+
+ assert.equal(rectVector.size(), 5);
+
+ let item = rectVector.get(0);
+ assert.equal(item.x, 1);
+ assert.equal(item.y, 2);
+ assert.equal(item.width, 3);
+ assert.equal(item.height, 4);
+
+ item = rectVector.get(1);
+ assert.equal(item.x, 0);
+ assert.equal(item.y, 0);
+ assert.equal(item.width, 0);
+ assert.equal(item.height, 0);
+
+ item = rectVector.get(2);
+ assert.equal(item.x, 1);
+ assert.equal(item.y, 2);
+ assert.equal(item.width, 3);
+ assert.equal(item.height, 4);
+
+ item = rectVector.get(3);
+ assert.equal(item.x, 5);
+ assert.equal(item.y, 6);
+ assert.equal(item.width, 7);
+ assert.equal(item.height, 8);
+
+ item = rectVector.get(4);
+ assert.equal(item.x, 9);
+ assert.equal(item.y, 10);
+ assert.equal(item.width, 11);
+ assert.equal(item.height, 12);
+
+ rectVector.delete();
+});
+QUnit.test('Test Size', function(assert) {
+ {
+ let mat = new cv.Mat();
+ mat.create({width: 5, height: 10}, cv.CV_8UC4);
+ let size = mat.size();
+
+ assert.ok(mat.type() === cv.CV_8UC4);
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 5);
+ assert.ok(mat.channels() === 4);
+
+ mat.delete();
+ }
+
+ {
+ let mat = new cv.Mat();
+ mat.create(new cv.Size(5, 10), cv.CV_8UC4);
+ let size = mat.size();
+
+ assert.ok(mat.type() === cv.CV_8UC4);
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 5);
+ assert.ok(mat.channels() === 4);
+
+ mat.delete();
+ }
+});
+
+
+QUnit.test('test_rotated_rect', function(assert) {
+ {
+ let rect = {center: {x: 100, y: 100}, size: {height: 100, width: 50}, angle: 30};
+
+ assert.equal(rect.center.x, 100);
+ assert.equal(rect.center.y, 100);
+ assert.equal(rect.angle, 30);
+ assert.equal(rect.size.height, 100);
+ assert.equal(rect.size.width, 50);
+ }
+
+ {
+ let rect = new cv.RotatedRect();
+
+ assert.equal(rect.center.x, 0);
+ assert.equal(rect.center.y, 0);
+ assert.equal(rect.angle, 0);
+ assert.equal(rect.size.height, 0);
+ assert.equal(rect.size.width, 0);
+
+ let points = cv.RotatedRect.points(rect);
+
+ assert.equal(points[0].x, 0);
+ assert.equal(points[0].y, 0);
+ assert.equal(points[1].x, 0);
+ assert.equal(points[1].y, 0);
+ assert.equal(points[2].x, 0);
+ assert.equal(points[2].y, 0);
+ assert.equal(points[3].x, 0);
+ assert.equal(points[3].y, 0);
+ }
+
+ {
+ let rect = new cv.RotatedRect({x: 100, y: 100}, {height: 100, width: 50}, 30);
+
+ assert.equal(rect.center.x, 100);
+ assert.equal(rect.center.y, 100);
+ assert.equal(rect.angle, 30);
+ assert.equal(rect.size.height, 100);
+ assert.equal(rect.size.width, 50);
+
+ let points = cv.RotatedRect.points(rect);
+
+ assert.equal(points[0].x, cv.RotatedRect.boundingRect2f(rect).x);
+ assert.equal(points[1].y, cv.RotatedRect.boundingRect2f(rect).y);
+ }
+});
--- /dev/null
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+}
+
+QUnit.module('Video', {});
+QUnit.test('Background Segmentation', function(assert) {
+ // BackgroundSubtractorMOG2
+ {
+ const history = 600;
+ const varThreshold = 15;
+ const detectShadows = true;
+
+ let mog2 = new cv.BackgroundSubtractorMOG2(history, varThreshold, detectShadows);
+
+ assert.equal(mog2 instanceof cv.BackgroundSubtractorMOG2, true);
+
+ mog2.delete();
+
+ mog2 = new cv.BackgroundSubtractorMOG2();
+
+ assert.equal(mog2 instanceof cv.BackgroundSubtractorMOG2, true);
+
+ mog2.delete();
+
+ mog2 = new cv.BackgroundSubtractorMOG2(history);
+
+ assert.equal(mog2 instanceof cv.BackgroundSubtractorMOG2, true);
+
+ mog2.delete();
+
+ mog2 = new cv.BackgroundSubtractorMOG2(history, varThreshold);
+
+ assert.equal(mog2 instanceof cv.BackgroundSubtractorMOG2, true);
+
+ mog2.delete();
+ }
+});
--- /dev/null
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <title>OpenCV JS Tests</title>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
+ <link rel="stylesheet" href="http://code.jquery.com/qunit/qunit-1.20.0.css" type="text/css" media="screen">
+ <style>
+ body {
+ font-family: Monospace;
+ background-color: #ffffff;
+ margin: 0px;
+ }
+ a {
+ color: #0040ff;
+ }
+ </style>
+ </head>
+ <body>
+
+ <div id="qunit"></div>
+ <div id="qunit-fixture"></div>
+
+ <script src="http://code.jquery.com/qunit/qunit-2.0.1.js"></script>
+ <script type="application/javascript" async src="opencv.js"></script>
+ <script type="application/javascript" src="test_mat.js"></script>
+ <script type="application/javascript" src="test_utils.js"></script>
+ <script type="application/javascript" src="test_imgproc.js"></script>
+ <script type="application/javascript" src="test_objdetect.js"></script>
+ <script type="application/javascript" src="test_video.js"></script>
+ <script type='text/javascript'>
+ QUnit.config.autostart = false;
+
+ var Module = {
+ preRun: [function() {
+ Module.FS_createPreloadedFile('/', 'haarcascade_frontalface_default.xml', 'haarcascade_frontalface_default.xml', true, false);
+ }],
+ postRun: [] ,
+ onRuntimeInitialized: function() {
+ console.log("Runtime is ready...");
+ QUnit.start();
+ },
+ print: (function() {
+ var element = document.getElementById('output');
+ if (element) element.value = ''; // clear browser cache
+ return function(text) {
+ console.log(text);
+ if (element) {
+ element.value += text + "\n";
+ element.scrollTop = element.scrollHeight; // focus on bottom
+ }
+ };
+ })(),
+ printErr: function(text) {
+ console.log(text);
+ },
+ setStatus: function(text) {
+ console.log(text);
+ },
+ totalDependencies: 0
+ };
+
+ Module.setStatus('Downloading...');
+ window.onerror = function(event) {
+ Module.setStatus('Exception thrown, see JavaScript console');
+ Module.setStatus = function(text) {
+ if (text) Module.printErr('[post-exception status] ' + text);
+ };
+ };
+ </script>
+
+<!--
+ TODO
+ <script type="application/javascript" src="test_features2d.js"></script>
+-->
+
+ </body>
+</html>
--- /dev/null
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+
+let testrunner = require('qunit');
+testrunner.options.maxBlockDuration = 20000; // cause opencv_js.js need time to load
+
+testrunner.run(
+ {
+ code: 'opencv.js',
+ tests: ['test_mat.js', 'test_utils.js', 'test_imgproc.js',
+ 'test_objdetect.js', 'test_video.js'],
+ },
+ function(err, report) {
+ console.log(report.failed + ' failed, ' + report.passed + ' passed');
+ }
+);
set(the_description "Object Detection")
-ocv_define_module(objdetect opencv_core opencv_imgproc WRAP java python)
+ocv_define_module(objdetect opencv_core opencv_imgproc WRAP java python js)
modlist.append("/CA " + macro_arg)
arg_str = arg_str[:npos] + arg_str[npos3+1:]
+ npos = arg_str.find("const")
+ if npos >= 0:
+ modlist.append("/C")
+
+ npos = arg_str.find("&")
+ if npos >= 0:
+ modlist.append("/Ref")
+
arg_str = arg_str.strip()
word_start = 0
word_list = []
func_modlist.append("="+arg)
decl_str = decl_str[:npos] + decl_str[npos3+1:]
+ virtual_method = False
+ pure_virtual_method = False
+ const_method = False
+
# filter off some common prefixes, which are meaningless for Python wrappers.
# note that we do not strip "static" prefix, which does matter;
# it means class methods, not instance methods
- decl_str = self.batch_replace(decl_str, [("virtual", ""), ("static inline", ""), ("inline", ""),\
+ decl_str = self.batch_replace(decl_str, [("static inline", ""), ("inline", ""),\
("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("CV_CDECL", ""), ("CV_WRAP ", " "), ("CV_INLINE", ""),
("CV_DEPRECATED", "")]).strip()
+
+ if decl_str.strip().startswith('virtual'):
+ virtual_method = True
+
+ decl_str = decl_str.replace('virtual' , '')
+
+ end_tokens = decl_str[decl_str.rfind(')'):].split()
+ const_method = 'const' in end_tokens
+ pure_virtual_method = '=' in end_tokens and '0' in end_tokens
+
static_method = False
context = top[0]
if decl_str.startswith("static") and (context == "class" or context == "struct"):
if static_method:
func_modlist.append("/S")
+ if const_method:
+ func_modlist.append("/C")
+ if virtual_method:
+ func_modlist.append("/V")
+ if pure_virtual_method:
+ func_modlist.append("/PV")
return [funcname, rettype, func_modlist, args, original_type, docstring]
set(the_description "Video Analysis")
-ocv_define_module(video opencv_imgproc WRAP java python)
+ocv_define_module(video opencv_imgproc WRAP java python js)
--- /dev/null
+Building OpenCV.js by Emscripten
+====================
+
+[Download and install Emscripten](https://kripken.github.io/emscripten-site/docs/getting_started/downloads.html).
+
+Execute `build_js.py` script:
+```
+python <opencv_src_dir>/platforms/js/build_js.py <build_dir>
+```
+
+If everything is fine, a few minutes later you will get `<build_dir>/bin/opencv.js`. You can add this into your web pages.
+
+Find out more build options by `-h` switch.
+
+For detailed build tutorial, check out `<opencv_src_dir>/doc/js_tutorials/js_setup/js_setup/js_setup.markdown`.
--- /dev/null
+#!/usr/bin/env python
+
+import os, sys, subprocess, argparse, shutil, glob, re, multiprocessing
+import logging as log
+
+class Fail(Exception):
+ def __init__(self, text=None):
+ self.t = text
+ def __str__(self):
+ return "ERROR" if self.t is None else self.t
+
+def execute(cmd, shell=False):
+ try:
+ log.info("Executing: %s" % cmd)
+ retcode = subprocess.call(cmd, shell=shell)
+ if retcode < 0:
+ raise Fail("Child was terminated by signal:" %s -retcode)
+ elif retcode > 0:
+ raise Fail("Child returned: %s" % retcode)
+ except OSError as e:
+ raise Fail("Execution failed: %d / %s" % (e.errno, e.strerror))
+
+def rm_one(d):
+ d = os.path.abspath(d)
+ if os.path.exists(d):
+ if os.path.isdir(d):
+ log.info("Removing dir: %s", d)
+ shutil.rmtree(d)
+ elif os.path.isfile(d):
+ log.info("Removing file: %s", d)
+ os.remove(d)
+
+def check_dir(d, create=False, clean=False):
+ d = os.path.abspath(d)
+ log.info("Check dir %s (create: %s, clean: %s)", d, create, clean)
+ if os.path.exists(d):
+ if not os.path.isdir(d):
+ raise Fail("Not a directory: %s" % d)
+ if clean:
+ for x in glob.glob(os.path.join(d, "*")):
+ rm_one(x)
+ else:
+ if create:
+ os.makedirs(d)
+ return d
+
+def check_file(d):
+ d = os.path.abspath(d)
+ if os.path.exists(d):
+ if os.path.isfile(d):
+ return True
+ else:
+ return False
+ return False
+
+def find_file(name, path):
+ for root, dirs, files in os.walk(path):
+ if name in files:
+ return os.path.join(root, name)
+
+def determine_emcc_version(emscripten_dir):
+ ret = subprocess.check_output([os.path.join(emscripten_dir, "emcc"), "--version"])
+ m = re.match(r'^emcc.*(\d+\.\d+\.\d+)', ret, flags=re.IGNORECASE)
+ return m.group(1)
+
+def determine_opencv_version(version_hpp_path):
+ # version in 2.4 - CV_VERSION_EPOCH.CV_VERSION_MAJOR.CV_VERSION_MINOR.CV_VERSION_REVISION
+ # version in master - CV_VERSION_MAJOR.CV_VERSION_MINOR.CV_VERSION_REVISION-CV_VERSION_STATUS
+ with open(version_hpp_path, "rt") as f:
+ data = f.read()
+ major = re.search(r'^#define\W+CV_VERSION_MAJOR\W+(\d+)$', data, re.MULTILINE).group(1)
+ minor = re.search(r'^#define\W+CV_VERSION_MINOR\W+(\d+)$', data, re.MULTILINE).group(1)
+ revision = re.search(r'^#define\W+CV_VERSION_REVISION\W+(\d+)$', data, re.MULTILINE).group(1)
+ version_status = re.search(r'^#define\W+CV_VERSION_STATUS\W+"([^"]*)"$', data, re.MULTILINE).group(1)
+ return "%(major)s.%(minor)s.%(revision)s%(version_status)s" % locals()
+
+class Builder:
+ def __init__(self, options):
+ self.options = options
+ self.build_dir = check_dir(options.build_dir, create=True)
+ self.opencv_dir = check_dir(options.opencv_dir)
+ self.emscripten_dir = check_dir(options.emscripten_dir)
+ self.opencv_version = determine_opencv_version(os.path.join(self.opencv_dir, "modules", "core", "include", "opencv2", "core", "version.hpp"))
+ self.emcc_version = determine_emcc_version(self.emscripten_dir)
+
+ def get_toolchain_file(self):
+ return os.path.join(self.emscripten_dir, "cmake", "Modules", "Platform", "Emscripten.cmake")
+
+ def clean_build_dir(self):
+ for d in ["CMakeCache.txt", "CMakeFiles/", "bin/", "libs/", "lib/", "modules"]:
+ rm_one(d)
+
+ def get_cmake_cmd(self):
+ cmd = ["cmake",
+ "-DCMAKE_BUILD_TYPE=Release",
+ "-DCMAKE_TOOLCHAIN_FILE='%s'" % self.get_toolchain_file(),
+ "-DCPU_BASELINE=''",
+ "-DCPU_DISPATCH=''",
+ "-DCV_TRACE=OFF",
+ "-DBUILD_SHARED_LIBS=OFF",
+ "-DWITH_1394=OFF",
+ "-DWITH_VTK=OFF",
+ "-DWITH_CUDA=OFF",
+ "-DWITH_CUFFT=OFF",
+ "-DWITH_CUBLAS=OFF",
+ "-DWITH_NVCUVID=OFF",
+ "-DWITH_EIGEN=OFF",
+ "-DWITH_FFMPEG=OFF",
+ "-DWITH_GSTREAMER=OFF",
+ "-DWITH_GTK=OFF",
+ "-DWITH_GTK_2_X=OFF",
+ "-DWITH_IPP=OFF",
+ "-DWITH_JASPER=OFF",
+ "-DWITH_JPEG=OFF",
+ "-DWITH_WEBP=OFF",
+ "-DWITH_OPENEXR=OFF",
+ "-DWITH_OPENGL=OFF",
+ "-DWITH_OPENVX=OFF",
+ "-DWITH_OPENNI=OFF",
+ "-DWITH_OPENNI2=OFF",
+ "-DWITH_PNG=OFF",
+ "-DWITH_TBB=OFF",
+ "-DWITH_PTHREADS_PF=OFF",
+ "-DWITH_TIFF=OFF",
+ "-DWITH_V4L=OFF",
+ "-DWITH_OPENCL=OFF",
+ "-DWITH_OPENCL_SVM=OFF",
+ "-DWITH_OPENCLAMDFFT=OFF",
+ "-DWITH_OPENCLAMDBLAS=OFF",
+ "-DWITH_MATLAB=OFF",
+ "-DWITH_GPHOTO2=OFF",
+ "-DWITH_LAPACK=OFF",
+ "-DWITH_ITT=OFF",
+ "-DBUILD_ZLIB=ON",
+ "-DBUILD_opencv_apps=OFF",
+ "-DBUILD_opencv_calib3d=OFF",
+ "-DBUILD_opencv_dnn=OFF",
+ "-DBUILD_opencv_features2d=OFF",
+ "-DBUILD_opencv_flann=OFF",
+ "-DBUILD_opencv_ml=OFF",
+ "-DBUILD_opencv_photo=OFF",
+ "-DBUILD_opencv_imgcodecs=OFF",
+ "-DBUILD_opencv_shape=OFF",
+ "-DBUILD_opencv_videoio=OFF",
+ "-DBUILD_opencv_videostab=OFF",
+ "-DBUILD_opencv_highgui=OFF",
+ "-DBUILD_opencv_superres=OFF",
+ "-DBUILD_opencv_stitching=OFF",
+ "-DBUILD_opencv_java=OFF",
+ "-DBUILD_opencv_js=ON",
+ "-DBUILD_opencv_python2=OFF",
+ "-DBUILD_opencv_python3=OFF",
+ "-DBUILD_EXAMPLES=OFF",
+ "-DBUILD_PACKAGE=OFF",
+ "-DBUILD_TESTS=OFF",
+ "-DBUILD_PERF_TESTS=OFF"]
+ if self.options.build_doc:
+ cmd.append("-DBUILD_DOCS=ON")
+ else:
+ cmd.append("-DBUILD_DOCS=OFF")
+
+ flags = self.get_build_flags()
+ if flags:
+ cmd += ["-DCMAKE_C_FLAGS='%s'" % flags,
+ "-DCMAKE_CXX_FLAGS='%s'" % flags]
+ return cmd;
+
+ def get_build_flags(self):
+ flags = ""
+ if self.options.build_wasm:
+ flags += "-s WASM=1 "
+ if self.options.enable_exception:
+ flags += "-s DISABLE_EXCEPTION_CATCHING=0 "
+ return flags
+
+ def config(self):
+ cmd = self.get_cmake_cmd()
+ cmd.append(self.opencv_dir)
+ execute(cmd)
+
+ def build_opencvjs(self):
+ execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv.js"])
+
+ def build_test(self):
+ execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv_js_test"])
+
+ def build_doc(self):
+ execute(["make", "-j", str(multiprocessing.cpu_count()), "doxygen"])
+
+
+#===================================================================================================
+
+if __name__ == "__main__":
+ opencv_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
+ emscripten_dir = None
+ if "EMSCRIPTEN" in os.environ:
+ emscripten_dir = os.environ["EMSCRIPTEN"]
+
+ parser = argparse.ArgumentParser(description='Build OpenCV.js by Emscripten')
+ parser.add_argument("build_dir", help="Building directory (and output)")
+ parser.add_argument('--opencv_dir', default=opencv_dir, help='Opencv source directory (default is "../.." relative to script location)')
+ parser.add_argument('--emscripten_dir', default=emscripten_dir, help="Path to Emscripten to use for build")
+ parser.add_argument('--build_wasm', action="store_true", help="Build OpenCV.js in WebAssembly format")
+ parser.add_argument('--build_test', action="store_true", help="Build tests")
+ parser.add_argument('--build_doc', action="store_true", help="Build tutorials")
+ parser.add_argument('--clean_build_dir', action="store_true", help="Clean build dir")
+ parser.add_argument('--skip_config', action="store_true", help="Skip cmake config")
+ parser.add_argument('--config_only', action="store_true", help="Only do cmake config")
+ parser.add_argument('--enable_exception', action="store_true", help="Enable exception handling")
+ args = parser.parse_args()
+
+ log.basicConfig(format='%(message)s', level=log.DEBUG)
+ log.debug("Args: %s", args)
+
+ if args.emscripten_dir is None:
+ log.info("Cannot get Emscripten path, please specify it either by EMSCRIPTEN environment variable or --emscripten_dir option.")
+ sys.exit(-1)
+
+ builder = Builder(args)
+
+ log.info("Detected OpenCV version: %s", builder.opencv_version)
+ log.info("Detected emcc version: %s", builder.emcc_version)
+
+ os.chdir(builder.build_dir)
+
+ if args.clean_build_dir:
+ log.info("=====")
+ log.info("===== Clean build dir %s", builder.build_dir)
+ log.info("=====")
+ builder.clean_build_dir()
+
+ if not args.skip_config:
+ target = "asm.js"
+ if args.build_wasm:
+ target = "wasm"
+ log.info("=====")
+ log.info("===== Config OpenCV.js build for %s" % target)
+ log.info("=====")
+ builder.config()
+
+ if args.config_only:
+ sys.exit(0);
+
+ log.info("=====")
+ log.info("===== Building OpenCV.js in %s", "asm.js" if not args.build_wasm else "wasm")
+ log.info("=====")
+ builder.build_opencvjs()
+
+ if args.build_test:
+ log.info("=====")
+ log.info("===== Building OpenCV.js tests")
+ log.info("=====")
+ builder.build_test()
+
+ if args.build_doc:
+ log.info("=====")
+ log.info("===== Building OpenCV.js tutorials")
+ log.info("=====")
+ builder.build_doc()
+
+
+ log.info("=====")
+ log.info("===== Build finished")
+ log.info("=====")
+
+ opencvjs_path = os.path.join(builder.build_dir, "bin", "opencv.js")
+ if check_file(opencvjs_path):
+ log.info("OpenCV.js location: %s", opencvjs_path)
+
+ if args.build_test:
+ opencvjs_test_path = os.path.join(builder.build_dir, "bin", "tests.html")
+ if check_file(opencvjs_test_path):
+ log.info("OpenCV.js tests location: %s", opencvjs_test_path)
+
+ if args.build_doc:
+ opencvjs_tutorial_path = find_file("tutorial_js_root.html", os.path.join(builder.build_dir, "doc", "doxygen", "html"))
+ if check_file(opencvjs_tutorial_path):
+ log.info("OpenCV.js tutorials location: %s", opencvjs_tutorial_path)