python_code
stringlengths 0
456k
|
---|
#!/usr/bin/env python
import os, sys, subprocess, argparse, shutil, glob, re, multiprocessing
import logging as log
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
class Fail(Exception):
def __init__(self, text=None):
self.t = text
def __str__(self):
return "ERROR" if self.t is None else self.t
def execute(cmd, shell=False):
try:
log.info("Executing: %s" % cmd)
env = os.environ.copy()
env['VERBOSE'] = '1'
retcode = subprocess.call(cmd, shell=shell, env=env)
if retcode < 0:
raise Fail("Child was terminated by signal: %s" % -retcode)
elif retcode > 0:
raise Fail("Child returned: %s" % retcode)
except OSError as e:
raise Fail("Execution failed: %d / %s" % (e.errno, e.strerror))
def rm_one(d):
d = os.path.abspath(d)
if os.path.exists(d):
if os.path.isdir(d):
log.info("Removing dir: %s", d)
shutil.rmtree(d)
elif os.path.isfile(d):
log.info("Removing file: %s", d)
os.remove(d)
def check_dir(d, create=False, clean=False):
d = os.path.abspath(d)
log.info("Check dir %s (create: %s, clean: %s)", d, create, clean)
if os.path.exists(d):
if not os.path.isdir(d):
raise Fail("Not a directory: %s" % d)
if clean:
for x in glob.glob(os.path.join(d, "*")):
rm_one(x)
else:
if create:
os.makedirs(d)
return d
def check_file(d):
d = os.path.abspath(d)
if os.path.exists(d):
if os.path.isfile(d):
return True
else:
return False
return False
def find_file(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
class Builder:
def __init__(self, options):
self.options = options
self.build_dir = check_dir(options.build_dir, create=True)
self.opencv_dir = check_dir(options.opencv_dir)
self.emscripten_dir = check_dir(options.emscripten_dir)
def get_toolchain_file(self):
return os.path.join(self.emscripten_dir, "cmake", "Modules", "Platform", "Emscripten.cmake")
def clean_build_dir(self):
for d in ["CMakeCache.txt", "CMakeFiles/", "bin/", "libs/", "lib/", "modules"]:
rm_one(d)
def get_cmake_cmd(self):
cmd = ["cmake",
"-DENABLE_PIC=FALSE", # To workaround emscripten upstream backend issue https://github.com/emscripten-core/emscripten/issues/8761
"-DCMAKE_BUILD_TYPE=Release",
"-DCMAKE_TOOLCHAIN_FILE='%s'" % self.get_toolchain_file(),
"-DCPU_BASELINE=''",
"-DCPU_DISPATCH=''",
"-DCV_TRACE=OFF",
"-DBUILD_SHARED_LIBS=OFF",
"-DWITH_1394=OFF",
"-DWITH_ADE=OFF",
"-DWITH_VTK=OFF",
"-DWITH_EIGEN=OFF",
"-DWITH_FFMPEG=OFF",
"-DWITH_GSTREAMER=OFF",
"-DWITH_GTK=OFF",
"-DWITH_GTK_2_X=OFF",
"-DWITH_IPP=OFF",
"-DWITH_JASPER=OFF",
"-DWITH_JPEG=OFF",
"-DWITH_WEBP=OFF",
"-DWITH_OPENEXR=OFF",
"-DWITH_OPENGL=OFF",
"-DWITH_OPENVX=OFF",
"-DWITH_OPENNI=OFF",
"-DWITH_OPENNI2=OFF",
"-DWITH_PNG=OFF",
"-DWITH_TBB=OFF",
"-DWITH_TIFF=OFF",
"-DWITH_V4L=OFF",
"-DWITH_OPENCL=OFF",
"-DWITH_OPENCL_SVM=OFF",
"-DWITH_OPENCLAMDFFT=OFF",
"-DWITH_OPENCLAMDBLAS=OFF",
"-DWITH_GPHOTO2=OFF",
"-DWITH_LAPACK=OFF",
"-DWITH_ITT=OFF",
"-DWITH_QUIRC=OFF",
"-DBUILD_ZLIB=ON",
"-DBUILD_opencv_apps=OFF",
"-DBUILD_opencv_calib3d=ON",
"-DBUILD_opencv_dnn=ON",
"-DBUILD_opencv_features2d=ON",
"-DBUILD_opencv_flann=ON", # No bindings provided. This module is used as a dependency for other modules.
"-DBUILD_opencv_gapi=OFF",
"-DBUILD_opencv_ml=OFF",
"-DBUILD_opencv_photo=ON",
"-DBUILD_opencv_imgcodecs=OFF",
"-DBUILD_opencv_shape=OFF",
"-DBUILD_opencv_videoio=OFF",
"-DBUILD_opencv_videostab=OFF",
"-DBUILD_opencv_highgui=OFF",
"-DBUILD_opencv_superres=OFF",
"-DBUILD_opencv_stitching=OFF",
"-DBUILD_opencv_java=OFF",
"-DBUILD_opencv_java_bindings_generator=OFF",
"-DBUILD_opencv_js=ON",
"-DBUILD_opencv_python2=OFF",
"-DBUILD_opencv_python3=OFF",
"-DBUILD_opencv_python_bindings_generator=OFF",
"-DBUILD_EXAMPLES=OFF",
"-DBUILD_PACKAGE=OFF",
"-DBUILD_TESTS=OFF",
"-DBUILD_PERF_TESTS=OFF"]
if self.options.cmake_option:
cmd += self.options.cmake_option
if self.options.build_doc:
cmd.append("-DBUILD_DOCS=ON")
else:
cmd.append("-DBUILD_DOCS=OFF")
if self.options.threads:
cmd.append("-DWITH_PTHREADS_PF=ON")
else:
cmd.append("-DWITH_PTHREADS_PF=OFF")
if self.options.simd:
cmd.append("-DCV_ENABLE_INTRINSICS=ON")
else:
cmd.append("-DCV_ENABLE_INTRINSICS=OFF")
if self.options.build_wasm_intrin_test:
cmd.append("-DBUILD_WASM_INTRIN_TESTS=ON")
else:
cmd.append("-DBUILD_WASM_INTRIN_TESTS=OFF")
flags = self.get_build_flags()
if flags:
cmd += ["-DCMAKE_C_FLAGS='%s'" % flags,
"-DCMAKE_CXX_FLAGS='%s'" % flags]
return cmd
def get_build_flags(self):
flags = ""
if self.options.build_wasm:
flags += "-s WASM=1 "
elif self.options.disable_wasm:
flags += "-s WASM=0 "
if self.options.threads:
flags += "-s USE_PTHREADS=1 -s PTHREAD_POOL_SIZE=4 "
else:
flags += "-s USE_PTHREADS=0 "
if self.options.enable_exception:
flags += "-s DISABLE_EXCEPTION_CATCHING=0 "
if self.options.simd:
flags += "-msimd128 "
if self.options.build_flags:
flags += self.options.build_flags
return flags
def config(self):
cmd = self.get_cmake_cmd()
cmd.append(self.opencv_dir)
execute(cmd)
def build_opencvjs(self):
execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv.js"])
def build_test(self):
execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv_js_test"])
def build_perf(self):
execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv_js_perf"])
def build_doc(self):
execute(["make", "-j", str(multiprocessing.cpu_count()), "doxygen"])
#===================================================================================================
if __name__ == "__main__":
opencv_dir = os.path.abspath(os.path.join(SCRIPT_DIR, '../..'))
emscripten_dir = None
if "EMSCRIPTEN" in os.environ:
emscripten_dir = os.environ["EMSCRIPTEN"]
parser = argparse.ArgumentParser(description='Build OpenCV.js by Emscripten')
parser.add_argument("build_dir", help="Building directory (and output)")
parser.add_argument('--opencv_dir', default=opencv_dir, help='Opencv source directory (default is "../.." relative to script location)')
parser.add_argument('--emscripten_dir', default=emscripten_dir, help="Path to Emscripten to use for build")
parser.add_argument('--build_wasm', action="store_true", help="Build OpenCV.js in WebAssembly format")
parser.add_argument('--disable_wasm', action="store_true", help="Build OpenCV.js in Asm.js format")
parser.add_argument('--threads', action="store_true", help="Build OpenCV.js with threads optimization")
parser.add_argument('--simd', action="store_true", help="Build OpenCV.js with SIMD optimization")
parser.add_argument('--build_test', action="store_true", help="Build tests")
parser.add_argument('--build_perf', action="store_true", help="Build performance tests")
parser.add_argument('--build_doc', action="store_true", help="Build tutorials")
parser.add_argument('--clean_build_dir', action="store_true", help="Clean build dir")
parser.add_argument('--skip_config', action="store_true", help="Skip cmake config")
parser.add_argument('--config_only', action="store_true", help="Only do cmake config")
parser.add_argument('--enable_exception', action="store_true", help="Enable exception handling")
# Use flag --cmake option="-D...=ON" only for one argument, if you would add more changes write new cmake_option flags
parser.add_argument('--cmake_option', action='append', help="Append CMake options")
# Use flag --build_flags="-s USE_PTHREADS=0 -Os" for one and more arguments as in the example
parser.add_argument('--build_flags', help="Append Emscripten build options")
parser.add_argument('--build_wasm_intrin_test', default=False, action="store_true", help="Build WASM intrin tests")
# Write a path to modify file like argument of this flag
parser.add_argument('--config', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'opencv_js.config.py'),
help="Specify configuration file with own list of exported into JS functions")
args = parser.parse_args()
log.basicConfig(format='%(message)s', level=log.DEBUG)
log.debug("Args: %s", args)
os.environ["OPENCV_JS_WHITELIST"] = args.config
if args.emscripten_dir is None:
log.info("Cannot get Emscripten path, please specify it either by EMSCRIPTEN environment variable or --emscripten_dir option.")
sys.exit(-1)
builder = Builder(args)
os.chdir(builder.build_dir)
if args.clean_build_dir:
log.info("=====")
log.info("===== Clean build dir %s", builder.build_dir)
log.info("=====")
builder.clean_build_dir()
if not args.skip_config:
target = "default target"
if args.build_wasm:
target = "wasm"
elif args.disable_wasm:
target = "asm.js"
log.info("=====")
log.info("===== Config OpenCV.js build for %s" % target)
log.info("=====")
builder.config()
if args.config_only:
sys.exit(0)
log.info("=====")
log.info("===== Building OpenCV.js")
log.info("=====")
builder.build_opencvjs()
if args.build_test:
log.info("=====")
log.info("===== Building OpenCV.js tests")
log.info("=====")
builder.build_test()
if args.build_perf:
log.info("=====")
log.info("===== Building OpenCV.js performance tests")
log.info("=====")
builder.build_perf()
if args.build_doc:
log.info("=====")
log.info("===== Building OpenCV.js tutorials")
log.info("=====")
builder.build_doc()
log.info("=====")
log.info("===== Build finished")
log.info("=====")
opencvjs_path = os.path.join(builder.build_dir, "bin", "opencv.js")
if check_file(opencvjs_path):
log.info("OpenCV.js location: %s", opencvjs_path)
if args.build_test:
opencvjs_test_path = os.path.join(builder.build_dir, "bin", "tests.html")
if check_file(opencvjs_test_path):
log.info("OpenCV.js tests location: %s", opencvjs_test_path)
if args.build_perf:
opencvjs_perf_path = os.path.join(builder.build_dir, "bin", "perf")
opencvjs_perf_base_path = os.path.join(builder.build_dir, "bin", "perf", "base.js")
if check_file(opencvjs_perf_base_path):
log.info("OpenCV.js performance tests location: %s", opencvjs_perf_path)
if args.build_doc:
opencvjs_tutorial_path = find_file("tutorial_js_root.html", os.path.join(builder.build_dir, "doc", "doxygen", "html"))
if check_file(opencvjs_tutorial_path):
log.info("OpenCV.js tutorials location: %s", opencvjs_tutorial_path)
|
#!/usr/bin/env python
"""
The script builds OpenCV.framework for iOS.
The built framework is universal, it can be used to build app and run it on either iOS simulator or real device.
Usage:
./build_framework.py <outputdir>
By cmake conventions (and especially if you work with OpenCV repository),
the output dir should not be a subdirectory of OpenCV source tree.
Script will create <outputdir>, if it's missing, and a few its subdirectories:
<outputdir>
build/
iPhoneOS-*/
[cmake-generated build tree for an iOS device target]
iPhoneSimulator-*/
[cmake-generated build tree for iOS simulator]
opencv2.framework/
[the framework content]
The script should handle minor OpenCV updates efficiently
- it does not recompile the library from scratch each time.
However, opencv2.framework directory is erased and recreated on each run.
Adding --dynamic parameter will build opencv2.framework as App Store dynamic framework. Only iOS 8+ versions are supported.
"""
from __future__ import print_function
import glob, re, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing
from subprocess import check_call, check_output, CalledProcessError
IPHONEOS_DEPLOYMENT_TARGET='8.0' # default, can be changed via command line options or environment variable
def execute(cmd, cwd = None):
print("Executing: %s in %s" % (cmd, cwd), file=sys.stderr)
print('Executing: ' + ' '.join(cmd))
retcode = check_call(cmd, cwd = cwd)
if retcode != 0:
raise Exception("Child returned:", retcode)
def getXCodeMajor():
ret = check_output(["xcodebuild", "-version"])
m = re.match(r'Xcode\s+(\d+)\..*', ret, flags=re.IGNORECASE)
if m:
return int(m.group(1))
else:
raise Exception("Failed to parse Xcode version")
class Builder:
def __init__(self, opencv, contrib, dynamic, bitcodedisabled, exclude, disable, enablenonfree, targets, debug, debug_info):
self.opencv = os.path.abspath(opencv)
self.contrib = None
if contrib:
modpath = os.path.join(contrib, "modules")
if os.path.isdir(modpath):
self.contrib = os.path.abspath(modpath)
else:
print("Note: contrib repository is bad - modules subfolder not found", file=sys.stderr)
self.dynamic = dynamic
self.bitcodedisabled = bitcodedisabled
self.exclude = exclude
self.disable = disable
self.enablenonfree = enablenonfree
self.targets = targets
self.debug = debug
self.debug_info = debug_info
def getBD(self, parent, t):
if len(t[0]) == 1:
res = os.path.join(parent, 'build-%s-%s' % (t[0][0].lower(), t[1].lower()))
else:
res = os.path.join(parent, 'build-%s' % t[1].lower())
if not os.path.isdir(res):
os.makedirs(res)
return os.path.abspath(res)
def _build(self, outdir):
outdir = os.path.abspath(outdir)
if not os.path.isdir(outdir):
os.makedirs(outdir)
mainWD = os.path.join(outdir, "build")
dirs = []
xcode_ver = getXCodeMajor()
if self.dynamic:
alltargets = self.targets
else:
# if we are building a static library, we must build each architecture separately
alltargets = []
for t in self.targets:
for at in t[0]:
current = ( [at], t[1] )
alltargets.append(current)
for t in alltargets:
mainBD = self.getBD(mainWD, t)
dirs.append(mainBD)
cmake_flags = []
if self.contrib:
cmake_flags.append("-DOPENCV_EXTRA_MODULES_PATH=%s" % self.contrib)
if xcode_ver >= 7 and t[1] == 'iPhoneOS' and self.bitcodedisabled == False:
cmake_flags.append("-DCMAKE_C_FLAGS=-fembed-bitcode")
cmake_flags.append("-DCMAKE_CXX_FLAGS=-fembed-bitcode")
self.buildOne(t[0], t[1], mainBD, cmake_flags)
if self.dynamic == False:
self.mergeLibs(mainBD)
self.makeFramework(outdir, dirs)
def build(self, outdir):
try:
self._build(outdir)
except Exception as e:
print("="*60, file=sys.stderr)
print("ERROR: %s" % e, file=sys.stderr)
print("="*60, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
sys.exit(1)
def getToolchain(self, arch, target):
return None
def getConfiguration(self):
return "Debug" if self.debug else "Release"
def getCMakeArgs(self, arch, target):
args = [
"cmake",
"-GXcode",
"-DAPPLE_FRAMEWORK=ON",
"-DCMAKE_INSTALL_PREFIX=install",
"-DCMAKE_BUILD_TYPE=%s" % self.getConfiguration(),
"-DOPENCV_INCLUDE_INSTALL_PATH=include",
"-DOPENCV_3P_LIB_INSTALL_PATH=lib/3rdparty"
] + ([
"-DBUILD_SHARED_LIBS=ON",
"-DCMAKE_MACOSX_BUNDLE=ON",
"-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED=NO",
] if self.dynamic else []) + ([
"-DOPENCV_ENABLE_NONFREE=ON"
] if self.enablenonfree else []) + ([
"-DBUILD_WITH_DEBUG_INFO=ON"
] if self.debug_info else [])
if len(self.exclude) > 0:
args += ["-DBUILD_opencv_world=OFF"] if not self.dynamic else []
args += ["-DBUILD_opencv_%s=OFF" % m for m in self.exclude]
if len(self.disable) > 0:
args += ["-DWITH_%s=OFF" % f for f in self.disable]
return args
def getBuildCommand(self, archs, target):
buildcmd = [
"xcodebuild",
]
if self.dynamic:
buildcmd += [
"IPHONEOS_DEPLOYMENT_TARGET=" + os.environ['IPHONEOS_DEPLOYMENT_TARGET'],
"ONLY_ACTIVE_ARCH=NO",
]
if not self.bitcodedisabled:
buildcmd.append("BITCODE_GENERATION_MODE=bitcode")
for arch in archs:
buildcmd.append("-arch")
buildcmd.append(arch.lower())
else:
arch = ";".join(archs)
buildcmd += [
"IPHONEOS_DEPLOYMENT_TARGET=" + os.environ['IPHONEOS_DEPLOYMENT_TARGET'],
"ARCHS=%s" % arch,
]
buildcmd += [
"-sdk", target.lower(),
"-configuration", self.getConfiguration(),
"-parallelizeTargets",
"-jobs", str(multiprocessing.cpu_count()),
] + (["-target","ALL_BUILD"] if self.dynamic else [])
return buildcmd
def getInfoPlist(self, builddirs):
return os.path.join(builddirs[0], "ios", "Info.plist")
def buildOne(self, arch, target, builddir, cmakeargs = []):
# Run cmake
toolchain = self.getToolchain(arch, target)
cmakecmd = self.getCMakeArgs(arch, target) + \
(["-DCMAKE_TOOLCHAIN_FILE=%s" % toolchain] if toolchain is not None else [])
if target.lower().startswith("iphoneos"):
cmakecmd.append("-DCPU_BASELINE=DETECT")
cmakecmd.append(self.opencv)
cmakecmd.extend(cmakeargs)
execute(cmakecmd, cwd = builddir)
# Clean and build
clean_dir = os.path.join(builddir, "install")
if os.path.isdir(clean_dir):
shutil.rmtree(clean_dir)
buildcmd = self.getBuildCommand(arch, target)
execute(buildcmd + ["-target", "ALL_BUILD", "build"], cwd = builddir)
execute(["cmake", "-DBUILD_TYPE=%s" % self.getConfiguration(), "-P", "cmake_install.cmake"], cwd = builddir)
def mergeLibs(self, builddir):
res = os.path.join(builddir, "lib", self.getConfiguration(), "libopencv_merged.a")
libs = glob.glob(os.path.join(builddir, "install", "lib", "*.a"))
libs3 = glob.glob(os.path.join(builddir, "install", "lib", "3rdparty", "*.a"))
print("Merging libraries:\n\t%s" % "\n\t".join(libs + libs3), file=sys.stderr)
execute(["libtool", "-static", "-o", res] + libs + libs3)
def makeFramework(self, outdir, builddirs):
name = "opencv2"
# set the current dir to the dst root
framework_dir = os.path.join(outdir, "%s.framework" % name)
if os.path.isdir(framework_dir):
shutil.rmtree(framework_dir)
os.makedirs(framework_dir)
if self.dynamic:
dstdir = framework_dir
libname = "opencv2.framework/opencv2"
else:
dstdir = os.path.join(framework_dir, "Versions", "A")
libname = "libopencv_merged.a"
# copy headers from one of build folders
shutil.copytree(os.path.join(builddirs[0], "install", "include", "opencv2"), os.path.join(dstdir, "Headers"))
# make universal static lib
libs = [os.path.join(d, "lib", self.getConfiguration(), libname) for d in builddirs]
lipocmd = ["lipo", "-create"]
lipocmd.extend(libs)
lipocmd.extend(["-o", os.path.join(dstdir, name)])
print("Creating universal library from:\n\t%s" % "\n\t".join(libs), file=sys.stderr)
execute(lipocmd)
# dynamic framework has different structure, just copy the Plist directly
if self.dynamic:
resdir = dstdir
shutil.copyfile(self.getInfoPlist(builddirs), os.path.join(resdir, "Info.plist"))
else:
# copy Info.plist
resdir = os.path.join(dstdir, "Resources")
os.makedirs(resdir)
shutil.copyfile(self.getInfoPlist(builddirs), os.path.join(resdir, "Info.plist"))
# make symbolic links
links = [
(["A"], ["Versions", "Current"]),
(["Versions", "Current", "Headers"], ["Headers"]),
(["Versions", "Current", "Resources"], ["Resources"]),
(["Versions", "Current", name], [name])
]
for l in links:
s = os.path.join(*l[0])
d = os.path.join(framework_dir, *l[1])
os.symlink(s, d)
class iOSBuilder(Builder):
def getToolchain(self, arch, target):
toolchain = os.path.join(self.opencv, "platforms", "ios", "cmake", "Toolchains", "Toolchain-%s_Xcode.cmake" % target)
return toolchain
def getCMakeArgs(self, arch, target):
arch = ";".join(arch)
args = Builder.getCMakeArgs(self, arch, target)
args = args + [
'-DIOS_ARCH=%s' % arch
]
return args
if __name__ == "__main__":
folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for iOS.')
parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework')
parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)')
parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)')
parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework')
parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF)')
parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)')
parser.add_argument('--disable-bitcode', default=False, dest='bitcodedisabled', action='store_true', help='disable bitcode (enabled by default)')
parser.add_argument('--iphoneos_deployment_target', default=os.environ.get('IPHONEOS_DEPLOYMENT_TARGET', IPHONEOS_DEPLOYMENT_TARGET), help='specify IPHONEOS_DEPLOYMENT_TARGET')
parser.add_argument('--iphoneos_archs', default='armv7,armv7s,arm64', help='select iPhoneOS target ARCHS')
parser.add_argument('--iphonesimulator_archs', default='i386,x86_64', help='select iPhoneSimulator target ARCHS')
parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)')
parser.add_argument('--debug', default=False, dest='debug', action='store_true', help='Build "Debug" binaries (disabled by default)')
parser.add_argument('--debug_info', default=False, dest='debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)')
args = parser.parse_args()
os.environ['IPHONEOS_DEPLOYMENT_TARGET'] = args.iphoneos_deployment_target
print('Using IPHONEOS_DEPLOYMENT_TARGET=' + os.environ['IPHONEOS_DEPLOYMENT_TARGET'])
iphoneos_archs = args.iphoneos_archs.split(',')
print('Using iPhoneOS ARCHS=' + str(iphoneos_archs))
iphonesimulator_archs = args.iphonesimulator_archs.split(',')
print('Using iPhoneSimulator ARCHS=' + str(iphonesimulator_archs))
b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.bitcodedisabled, args.without, args.disable, args.enablenonfree,
[
(iphoneos_archs, "iPhoneOS"),
] if os.environ.get('BUILD_PRECOMMIT', None) else
[
(iphoneos_archs, "iPhoneOS"),
(iphonesimulator_archs, "iPhoneSimulator"),
], args.debug, args.debug_info)
b.build(args.out)
|
ABIs = [
ABI("2", "armeabi-v7a", None, 21, cmake_vars=dict(ANDROID_ABI='armeabi-v7a with NEON')),
ABI("3", "arm64-v8a", None, 21),
ABI("5", "x86_64", None, 21),
ABI("4", "x86", None, 21),
]
|
ABIs = [
ABI("2", "armeabi-v7a", None, cmake_vars=dict(ANDROID_ABI='armeabi-v7a with NEON')),
ABI("3", "arm64-v8a", None),
ABI("5", "x86_64", None),
ABI("4", "x86", None),
]
|
ABIs = [
ABI("2", "armeabi-v7a", None, cmake_vars=dict(ANDROID_ABI='armeabi-v7a with NEON')),
ABI("3", "arm64-v8a", None),
ABI("5", "x86_64", None),
ABI("4", "x86", None),
]
|
ABIs = [
ABI("2", "armeabi-v7a", "arm-linux-androideabi-4.8", cmake_vars=dict(ANDROID_ABI='armeabi-v7a with NEON')),
ABI("1", "armeabi", "arm-linux-androideabi-4.8"),
ABI("3", "arm64-v8a", "aarch64-linux-android-4.9"),
ABI("5", "x86_64", "x86_64-4.9"),
ABI("4", "x86", "x86-4.8"),
ABI("7", "mips64", "mips64el-linux-android-4.9"),
ABI("6", "mips", "mipsel-linux-android-4.8")
]
|
ABIs = [
ABI("2", "armeabi-v7a", "arm-linux-androideabi-4.9", cmake_vars=dict(ANDROID_ABI='armeabi-v7a with NEON')),
ABI("1", "armeabi", "arm-linux-androideabi-4.9", cmake_vars=dict(WITH_TBB='OFF')),
ABI("3", "arm64-v8a", "aarch64-linux-android-4.9"),
ABI("5", "x86_64", "x86_64-4.9"),
ABI("4", "x86", "x86-4.9"),
]
|
#!/usr/bin/env python
import os, sys
import argparse
import glob
import re
import shutil
import subprocess
import time
import logging as log
import xml.etree.ElementTree as ET
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
class Fail(Exception):
def __init__(self, text=None):
self.t = text
def __str__(self):
return "ERROR" if self.t is None else self.t
def execute(cmd, shell=False):
try:
log.debug("Executing: %s" % cmd)
log.info('Executing: ' + ' '.join(cmd))
retcode = subprocess.call(cmd, shell=shell)
if retcode < 0:
raise Fail("Child was terminated by signal: %s" % -retcode)
elif retcode > 0:
raise Fail("Child returned: %s" % retcode)
except OSError as e:
raise Fail("Execution failed: %d / %s" % (e.errno, e.strerror))
def rm_one(d):
d = os.path.abspath(d)
if os.path.exists(d):
if os.path.isdir(d):
log.info("Removing dir: %s", d)
shutil.rmtree(d)
elif os.path.isfile(d):
log.info("Removing file: %s", d)
os.remove(d)
def check_dir(d, create=False, clean=False):
d = os.path.abspath(d)
log.info("Check dir %s (create: %s, clean: %s)", d, create, clean)
if os.path.exists(d):
if not os.path.isdir(d):
raise Fail("Not a directory: %s" % d)
if clean:
for x in glob.glob(os.path.join(d, "*")):
rm_one(x)
else:
if create:
os.makedirs(d)
return d
def check_executable(cmd):
try:
log.debug("Executing: %s" % cmd)
result = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
log.debug("Result: %s" % (result+'\n').split('\n')[0])
return True
except Exception as e:
log.debug('Failed: %s' % e)
return False
def determine_opencv_version(version_hpp_path):
# version in 2.4 - CV_VERSION_EPOCH.CV_VERSION_MAJOR.CV_VERSION_MINOR.CV_VERSION_REVISION
# version in master - CV_VERSION_MAJOR.CV_VERSION_MINOR.CV_VERSION_REVISION-CV_VERSION_STATUS
with open(version_hpp_path, "rt") as f:
data = f.read()
major = re.search(r'^#define\W+CV_VERSION_MAJOR\W+(\d+)$', data, re.MULTILINE).group(1)
minor = re.search(r'^#define\W+CV_VERSION_MINOR\W+(\d+)$', data, re.MULTILINE).group(1)
revision = re.search(r'^#define\W+CV_VERSION_REVISION\W+(\d+)$', data, re.MULTILINE).group(1)
version_status = re.search(r'^#define\W+CV_VERSION_STATUS\W+"([^"]*)"$', data, re.MULTILINE).group(1)
return "%(major)s.%(minor)s.%(revision)s%(version_status)s" % locals()
# shutil.move fails if dst exists
def move_smart(src, dst):
def move_recurse(subdir):
s = os.path.join(src, subdir)
d = os.path.join(dst, subdir)
if os.path.exists(d):
if os.path.isdir(d):
for item in os.listdir(s):
move_recurse(os.path.join(subdir, item))
elif os.path.isfile(s):
shutil.move(s, d)
else:
shutil.move(s, d)
move_recurse('')
# shutil.copytree fails if dst exists
def copytree_smart(src, dst):
def copy_recurse(subdir):
s = os.path.join(src, subdir)
d = os.path.join(dst, subdir)
if os.path.exists(d):
if os.path.isdir(d):
for item in os.listdir(s):
copy_recurse(os.path.join(subdir, item))
elif os.path.isfile(s):
shutil.copy2(s, d)
else:
if os.path.isdir(s):
shutil.copytree(s, d)
elif os.path.isfile(s):
shutil.copy2(s, d)
copy_recurse('')
#===================================================================================================
class ABI:
def __init__(self, platform_id, name, toolchain, ndk_api_level = None, cmake_vars = dict()):
self.platform_id = platform_id # platform code to add to apk version (for cmake)
self.name = name # general name (official Android ABI identifier)
self.toolchain = toolchain # toolchain identifier (for cmake)
self.cmake_vars = dict(
ANDROID_STL="gnustl_static",
ANDROID_ABI=self.name,
ANDROID_PLATFORM_ID=platform_id,
)
if toolchain is not None:
self.cmake_vars['ANDROID_TOOLCHAIN_NAME'] = toolchain
else:
self.cmake_vars['ANDROID_TOOLCHAIN'] = 'clang'
self.cmake_vars['ANDROID_STL'] = 'c++_shared'
if ndk_api_level:
self.cmake_vars['ANDROID_NATIVE_API_LEVEL'] = ndk_api_level
self.cmake_vars.update(cmake_vars)
def __str__(self):
return "%s (%s)" % (self.name, self.toolchain)
def haveIPP(self):
return self.name == "x86" or self.name == "x86_64"
#===================================================================================================
class Builder:
def __init__(self, workdir, opencvdir, config):
self.workdir = check_dir(workdir, create=True)
self.opencvdir = check_dir(opencvdir)
self.config = config
self.libdest = check_dir(os.path.join(self.workdir, "o4a"), create=True, clean=True)
self.resultdest = check_dir(os.path.join(self.workdir, 'OpenCV-android-sdk'), create=True, clean=True)
self.docdest = check_dir(os.path.join(self.workdir, 'OpenCV-android-sdk', 'sdk', 'java', 'javadoc'), create=True, clean=True)
self.extra_packs = []
self.opencv_version = determine_opencv_version(os.path.join(self.opencvdir, "modules", "core", "include", "opencv2", "core", "version.hpp"))
self.use_ccache = False if config.no_ccache else True
self.cmake_path = self.get_cmake()
self.ninja_path = self.get_ninja()
self.debug = True if config.debug else False
self.debug_info = True if config.debug_info else False
self.no_samples_build = True if config.no_samples_build else False
def get_cmake(self):
if not self.config.use_android_buildtools and check_executable(['cmake', '--version']):
log.info("Using cmake from PATH")
return 'cmake'
# look to see if Android SDK's cmake is installed
android_cmake = os.path.join(os.environ['ANDROID_SDK'], 'cmake')
if os.path.exists(android_cmake):
cmake_subdirs = [f for f in os.listdir(android_cmake) if check_executable([os.path.join(android_cmake, f, 'bin', 'cmake'), '--version'])]
if len(cmake_subdirs) > 0:
# there could be more than one - just take the first one
cmake_from_sdk = os.path.join(android_cmake, cmake_subdirs[0], 'bin', 'cmake')
log.info("Using cmake from Android SDK: %s", cmake_from_sdk)
return cmake_from_sdk
raise Fail("Can't find cmake")
def get_ninja(self):
if not self.config.use_android_buildtools and check_executable(['ninja', '--version']):
log.info("Using ninja from PATH")
return 'ninja'
# Android SDK's cmake includes a copy of ninja - look to see if its there
android_cmake = os.path.join(os.environ['ANDROID_SDK'], 'cmake')
if os.path.exists(android_cmake):
cmake_subdirs = [f for f in os.listdir(android_cmake) if check_executable([os.path.join(android_cmake, f, 'bin', 'ninja'), '--version'])]
if len(cmake_subdirs) > 0:
# there could be more than one - just take the first one
ninja_from_sdk = os.path.join(android_cmake, cmake_subdirs[0], 'bin', 'ninja')
log.info("Using ninja from Android SDK: %s", ninja_from_sdk)
return ninja_from_sdk
raise Fail("Can't find ninja")
def get_toolchain_file(self):
if not self.config.force_opencv_toolchain:
toolchain = os.path.join(os.environ['ANDROID_NDK'], 'build', 'cmake', 'android.toolchain.cmake')
if os.path.exists(toolchain):
return toolchain
toolchain = os.path.join(SCRIPT_DIR, "android.toolchain.cmake")
if os.path.exists(toolchain):
return toolchain
else:
raise Fail("Can't find toolchain")
def get_engine_apk_dest(self, engdest):
return os.path.join(engdest, "platforms", "android", "service", "engine", ".build")
def add_extra_pack(self, ver, path):
if path is None:
return
self.extra_packs.append((ver, check_dir(path)))
def clean_library_build_dir(self):
for d in ["CMakeCache.txt", "CMakeFiles/", "bin/", "libs/", "lib/", "package/", "install/samples/"]:
rm_one(d)
def build_library(self, abi, do_install):
cmd = [self.cmake_path, "-GNinja"]
cmake_vars = dict(
CMAKE_TOOLCHAIN_FILE=self.get_toolchain_file(),
INSTALL_CREATE_DISTRIB="ON",
WITH_OPENCL="OFF",
WITH_IPP=("ON" if abi.haveIPP() else "OFF"),
WITH_TBB="ON",
BUILD_EXAMPLES="OFF",
BUILD_TESTS="OFF",
BUILD_PERF_TESTS="OFF",
BUILD_DOCS="OFF",
BUILD_ANDROID_EXAMPLES=("OFF" if self.no_samples_build else "ON"),
INSTALL_ANDROID_EXAMPLES="ON",
)
if self.ninja_path != 'ninja':
cmake_vars['CMAKE_MAKE_PROGRAM'] = self.ninja_path
if self.debug:
cmake_vars['CMAKE_BUILD_TYPE'] = "Debug"
if self.debug_info: # Release with debug info
cmake_vars['BUILD_WITH_DEBUG_INFO'] = "ON"
if self.config.modules_list is not None:
cmd.append("-DBUILD_LIST='%s'" % self.config.modules_list)
if self.config.extra_modules_path is not None:
cmd.append("-DOPENCV_EXTRA_MODULES_PATH='%s'" % self.config.extra_modules_path)
if self.use_ccache == True:
cmd.append("-DNDK_CCACHE=ccache")
if do_install:
cmd.extend(["-DBUILD_TESTS=ON", "-DINSTALL_TESTS=ON"])
cmake_vars.update(abi.cmake_vars)
cmd += [ "-D%s='%s'" % (k, v) for (k, v) in cmake_vars.items() if v is not None]
cmd.append(self.opencvdir)
execute(cmd)
# full parallelism for C++ compilation tasks
execute([self.ninja_path, "opencv_modules"])
# limit parallelism for building samples (avoid huge memory consumption)
if self.no_samples_build:
execute([self.ninja_path, "install" if (self.debug_info or self.debug) else "install/strip"])
else:
execute([self.ninja_path, "-j1" if (self.debug_info or self.debug) else "-j3", "install" if (self.debug_info or self.debug) else "install/strip"])
def build_javadoc(self):
classpaths = []
for dir, _, files in os.walk(os.environ["ANDROID_SDK"]):
for f in files:
if f == "android.jar" or f == "annotations.jar":
classpaths.append(os.path.join(dir, f))
srcdir = os.path.join(self.resultdest, 'sdk', 'java', 'src')
dstdir = self.docdest
# synchronize with modules/java/jar/build.xml.in
shutil.copy2(os.path.join(SCRIPT_DIR, '../../doc/mymath.js'), dstdir)
cmd = [
"javadoc",
'-windowtitle', 'OpenCV %s Java documentation' % self.opencv_version,
'-doctitle', 'OpenCV Java documentation (%s)' % self.opencv_version,
"-nodeprecated",
"-public",
'-sourcepath', srcdir,
'-encoding', 'UTF-8',
'-charset', 'UTF-8',
'-docencoding', 'UTF-8',
'--allow-script-in-comments',
'-header',
'''
<script>
var url = window.location.href;
var pos = url.lastIndexOf('/javadoc/');
url = pos >= 0 ? (url.substring(0, pos) + '/javadoc/mymath.js') : (window.location.origin + '/mymath.js');
var script = document.createElement('script');
script.src = '%s/MathJax.js?config=TeX-AMS-MML_HTMLorMML,' + url;
document.getElementsByTagName('head')[0].appendChild(script);
</script>
''' % 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0',
'-bottom', 'Generated on %s / OpenCV %s' % (time.strftime("%Y-%m-%d %H:%M:%S"), self.opencv_version),
"-d", dstdir,
"-classpath", ":".join(classpaths),
'-subpackages', 'org.opencv',
]
execute(cmd)
def gather_results(self):
# Copy all files
root = os.path.join(self.libdest, "install")
for item in os.listdir(root):
src = os.path.join(root, item)
dst = os.path.join(self.resultdest, item)
if os.path.isdir(src):
log.info("Copy dir: %s", item)
if self.config.force_copy:
copytree_smart(src, dst)
else:
move_smart(src, dst)
elif os.path.isfile(src):
log.info("Copy file: %s", item)
if self.config.force_copy:
shutil.copy2(src, dst)
else:
shutil.move(src, dst)
#===================================================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Build OpenCV for Android SDK')
parser.add_argument("work_dir", nargs='?', default='.', help="Working directory (and output)")
parser.add_argument("opencv_dir", nargs='?', default=os.path.join(SCRIPT_DIR, '../..'), help="Path to OpenCV source dir")
parser.add_argument('--config', default='ndk-18-api-level-21.config.py', type=str, help="Package build configuration", )
parser.add_argument('--ndk_path', help="Path to Android NDK to use for build")
parser.add_argument('--sdk_path', help="Path to Android SDK to use for build")
parser.add_argument('--use_android_buildtools', action="store_true", help='Use cmake/ninja build tools from Android SDK')
parser.add_argument("--modules_list", help="List of modules to include for build")
parser.add_argument("--extra_modules_path", help="Path to extra modules to use for build")
parser.add_argument('--sign_with', help="Certificate to sign the Manager apk")
parser.add_argument('--build_doc', action="store_true", help="Build javadoc")
parser.add_argument('--no_ccache', action="store_true", help="Do not use ccache during library build")
parser.add_argument('--force_copy', action="store_true", help="Do not use file move during library build (useful for debug)")
parser.add_argument('--force_opencv_toolchain', action="store_true", help="Do not use toolchain from Android NDK")
parser.add_argument('--debug', action="store_true", help="Build 'Debug' binaries (CMAKE_BUILD_TYPE=Debug)")
parser.add_argument('--debug_info', action="store_true", help="Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)")
parser.add_argument('--no_samples_build', action="store_true", help="Do not build samples (speeds up build)")
args = parser.parse_args()
log.basicConfig(format='%(message)s', level=log.DEBUG)
log.debug("Args: %s", args)
if args.ndk_path is not None:
os.environ["ANDROID_NDK"] = args.ndk_path
if args.sdk_path is not None:
os.environ["ANDROID_SDK"] = args.sdk_path
if not 'ANDROID_HOME' in os.environ and 'ANDROID_SDK' in os.environ:
os.environ['ANDROID_HOME'] = os.environ["ANDROID_SDK"]
if not 'ANDROID_SDK' in os.environ:
raise Fail("SDK location not set. Either pass --sdk_path or set ANDROID_SDK environment variable")
# look for an NDK installed with the Android SDK
if not 'ANDROID_NDK' in os.environ and 'ANDROID_SDK' in os.environ and os.path.exists(os.path.join(os.environ["ANDROID_SDK"], 'ndk-bundle')):
os.environ['ANDROID_NDK'] = os.path.join(os.environ["ANDROID_SDK"], 'ndk-bundle')
if not 'ANDROID_NDK' in os.environ:
raise Fail("NDK location not set. Either pass --ndk_path or set ANDROID_NDK environment variable")
if not check_executable(['ccache', '--version']):
log.info("ccache not found - disabling ccache support")
args.no_ccache = True
if os.path.realpath(args.work_dir) == os.path.realpath(SCRIPT_DIR):
raise Fail("Specify workdir (building from script directory is not supported)")
if os.path.realpath(args.work_dir) == os.path.realpath(args.opencv_dir):
raise Fail("Specify workdir (building from OpenCV source directory is not supported)")
# Relative paths become invalid in sub-directories
if args.opencv_dir is not None and not os.path.isabs(args.opencv_dir):
args.opencv_dir = os.path.abspath(args.opencv_dir)
if args.extra_modules_path is not None and not os.path.isabs(args.extra_modules_path):
args.extra_modules_path = os.path.abspath(args.extra_modules_path)
cpath = args.config
if not os.path.exists(cpath):
cpath = os.path.join(SCRIPT_DIR, cpath)
if not os.path.exists(cpath):
raise Fail('Config "%s" is missing' % args.config)
with open(cpath, 'r') as f:
cfg = f.read()
print("Package configuration:")
print('=' * 80)
print(cfg.strip())
print('=' * 80)
ABIs = None # make flake8 happy
exec(compile(cfg, cpath, 'exec'))
log.info("Android NDK path: %s", os.environ["ANDROID_NDK"])
log.info("Android SDK path: %s", os.environ["ANDROID_SDK"])
builder = Builder(args.work_dir, args.opencv_dir, args)
log.info("Detected OpenCV version: %s", builder.opencv_version)
for i, abi in enumerate(ABIs):
do_install = (i == 0)
log.info("=====")
log.info("===== Building library for %s", abi)
log.info("=====")
os.chdir(builder.libdest)
builder.clean_library_build_dir()
builder.build_library(abi, do_install)
builder.gather_results()
if args.build_doc:
builder.build_javadoc()
log.info("=====")
log.info("===== Build finished")
log.info("=====")
log.info("SDK location: %s", builder.resultdest)
log.info("Documentation location: %s", builder.docdest)
|
#!/usr/bin/env python
import unittest
import os, sys, subprocess, argparse, shutil, re
import logging as log
log.basicConfig(format='%(message)s', level=log.DEBUG)
CMAKE_TEMPLATE='''\
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
# Enable C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
SET(PROJECT_NAME hello-android)
PROJECT(${PROJECT_NAME})
FIND_PACKAGE(OpenCV REQUIRED %(libset)s)
FILE(GLOB srcs "*.cpp")
ADD_EXECUTABLE(${PROJECT_NAME} ${srcs})
TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${OpenCV_LIBS} dl z)
'''
CPP_TEMPLATE = '''\
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
const char* message = "Hello Android!";
int main(int argc, char* argv[])
{
(void)argc; (void)argv;
printf("%s\\n", message);
Size textsize = getTextSize(message, FONT_HERSHEY_COMPLEX, 3, 5, 0);
Mat img(textsize.height + 20, textsize.width + 20, CV_32FC1, Scalar(230,230,230));
putText(img, message, Point(10, img.rows - 10), FONT_HERSHEY_COMPLEX, 3, Scalar(0, 0, 0), 5);
imwrite("/mnt/sdcard/HelloAndroid.png", img);
return 0;
}
'''
#===================================================================================================
class TestCmakeBuild(unittest.TestCase):
def __init__(self, libset, abi, cmake_vars, opencv_cmake_path, workdir, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.libset = libset
self.abi = abi
self.cmake_vars = cmake_vars
self.opencv_cmake_path = opencv_cmake_path
self.workdir = workdir
self.srcdir = os.path.join(self.workdir, "src")
self.bindir = os.path.join(self.workdir, "build")
def shortDescription(self):
return "ABI: %s, LIBSET: %s" % (self.abi, self.libset)
def getCMakeToolchain(self):
if True:
toolchain = os.path.join(os.environ['ANDROID_NDK'], 'build', 'cmake', 'android.toolchain.cmake')
if os.path.exists(toolchain):
return toolchain
toolchain = os.path.join(self.opencv_cmake_path, "android.toolchain.cmake")
if os.path.exists(toolchain):
return toolchain
else:
raise Exception("Can't find toolchain")
def gen_cmakelists(self):
return CMAKE_TEMPLATE % {"libset": self.libset}
def gen_code(self):
return CPP_TEMPLATE
def write_src_file(self, fname, content):
with open(os.path.join(self.srcdir, fname), "w") as f:
f.write(content)
def setUp(self):
if os.path.exists(self.workdir):
shutil.rmtree(self.workdir)
os.mkdir(self.workdir)
os.mkdir(self.srcdir)
os.mkdir(self.bindir)
self.write_src_file("CMakeLists.txt", self.gen_cmakelists())
self.write_src_file("main.cpp", self.gen_code())
os.chdir(self.bindir)
def tearDown(self):
pass
#if os.path.exists(self.workdir):
# shutil.rmtree(self.workdir)
def runTest(self):
cmd = [
"cmake",
"-GNinja",
"-DOpenCV_DIR=%s" % self.opencv_cmake_path,
"-DCMAKE_TOOLCHAIN_FILE=%s" % self.getCMakeToolchain(),
self.srcdir
] + [ "-D{}={}".format(key, value) for key, value in self.cmake_vars.items() ]
log.info("Executing: %s" % cmd)
retcode = subprocess.call(cmd)
self.assertEqual(retcode, 0, "cmake failed")
cmd = ["ninja", "-v"]
log.info("Executing: %s" % cmd)
retcode = subprocess.call(cmd)
self.assertEqual(retcode, 0, "make failed")
def suite(workdir, opencv_cmake_path):
abis = {
"armeabi-v7a": { "ANDROID_ABI": "armeabi-v7a", "ANDROID_TOOLCHAIN": "clang", "ANDROID_STL": "c++_shared", 'ANDROID_NATIVE_API_LEVEL': "21" },
"arm64-v8a": { "ANDROID_ABI": "arm64-v8a", "ANDROID_TOOLCHAIN": "clang", "ANDROID_STL": "c++_shared", 'ANDROID_NATIVE_API_LEVEL': "21" },
"x86": { "ANDROID_ABI": "x86", "ANDROID_TOOLCHAIN": "clang", "ANDROID_STL": "c++_shared", 'ANDROID_NATIVE_API_LEVEL': "21" },
"x86_64": { "ANDROID_ABI": "x86_64", "ANDROID_TOOLCHAIN": "clang", "ANDROID_STL": "c++_shared", 'ANDROID_NATIVE_API_LEVEL': "21" },
}
suite = unittest.TestSuite()
for libset in ["", "opencv_java"]:
for abi, cmake_vars in abis.items():
suite.addTest(TestCmakeBuild(libset, abi, cmake_vars, opencv_cmake_path,
os.path.join(workdir, "{}-{}".format(abi, "static" if libset == "" else "shared"))))
return suite
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test OpenCV for Android SDK with cmake')
parser.add_argument('--sdk_path', help="Path to Android SDK to use for build")
parser.add_argument('--ndk_path', help="Path to Android NDK to use for build")
parser.add_argument("--workdir", default="testspace", help="Working directory (and output)")
parser.add_argument("opencv_cmake_path", help="Path to folder with OpenCVConfig.cmake and android.toolchain.cmake (usually <SDK>/sdk/native/jni/")
args = parser.parse_args()
if args.sdk_path is not None:
os.environ["ANDROID_SDK"] = os.path.abspath(args.sdk_path)
if args.ndk_path is not None:
os.environ["ANDROID_NDK"] = os.path.abspath(args.ndk_path)
if not 'ANDROID_HOME' in os.environ and 'ANDROID_SDK' in os.environ:
os.environ['ANDROID_HOME'] = os.environ["ANDROID_SDK"]
print("Using SDK: %s" % os.environ["ANDROID_SDK"])
print("Using NDK: %s" % os.environ["ANDROID_NDK"])
workdir = os.path.abspath(args.workdir)
if not os.path.exists(workdir):
os.mkdir(workdir)
res = unittest.TextTestRunner(verbosity=3).run(suite(workdir, os.path.abspath(args.opencv_cmake_path)))
if not res.wasSuccessful():
sys.exit(res)
|
#!/usr/bin/env python
'''
sample for disctrete fourier transform (dft)
USAGE:
dft.py <image_file>
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
def shift_dft(src, dst=None):
'''
Rearrange the quadrants of Fourier image so that the origin is at
the image center. Swaps quadrant 1 with 3, and 2 with 4.
src and dst arrays must be equal size & type
'''
if dst is None:
dst = np.empty(src.shape, src.dtype)
elif src.shape != dst.shape:
raise ValueError("src and dst must have equal sizes")
elif src.dtype != dst.dtype:
raise TypeError("src and dst must have equal types")
if src is dst:
ret = np.empty(src.shape, src.dtype)
else:
ret = dst
h, w = src.shape[:2]
cx1 = cx2 = w // 2
cy1 = cy2 = h // 2
# if the size is odd, then adjust the bottom/right quadrants
if w % 2 != 0:
cx2 += 1
if h % 2 != 0:
cy2 += 1
# swap quadrants
# swap q1 and q3
ret[h-cy1:, w-cx1:] = src[0:cy1 , 0:cx1 ] # q1 -> q3
ret[0:cy2 , 0:cx2 ] = src[h-cy2:, w-cx2:] # q3 -> q1
# swap q2 and q4
ret[0:cy2 , w-cx2:] = src[h-cy2:, 0:cx2 ] # q2 -> q4
ret[h-cy1:, 0:cx1 ] = src[0:cy1 , w-cx1:] # q4 -> q2
if src is dst:
dst[:,:] = ret
return dst
def main():
if len(sys.argv) > 1:
fname = sys.argv[1]
else:
fname = 'baboon.jpg'
print("usage : python dft.py <image_file>")
im = cv.imread(cv.samples.findFile(fname))
# convert to grayscale
im = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
h, w = im.shape[:2]
realInput = im.astype(np.float64)
# perform an optimally sized dft
dft_M = cv.getOptimalDFTSize(w)
dft_N = cv.getOptimalDFTSize(h)
# copy A to dft_A and pad dft_A with zeros
dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64)
dft_A[:h, :w, 0] = realInput
# no need to pad bottom part of dft_A with zeros because of
# use of nonzeroRows parameter in cv.dft()
cv.dft(dft_A, dst=dft_A, nonzeroRows=h)
cv.imshow("win", im)
# Split fourier into real and imaginary parts
image_Re, image_Im = cv.split(dft_A)
# Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
magnitude = cv.sqrt(image_Re**2.0 + image_Im**2.0)
# Compute log(1 + Mag)
log_spectrum = cv.log(1.0 + magnitude)
# Rearrange the quadrants of Fourier image so that the origin is at
# the image center
shift_dft(log_spectrum, log_spectrum)
# normalize and display the results as rgb
cv.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv.NORM_MINMAX)
cv.imshow("magnitude", log_spectrum)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
browse.py
=========
Sample shows how to implement a simple hi resolution image navigation
Usage
-----
browse.py [image filename]
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
# built-in modules
import sys
def main():
if len(sys.argv) > 1:
fn = cv.samples.findFile(sys.argv[1])
print('loading %s ...' % fn)
img = cv.imread(fn)
if img is None:
print('Failed to load fn:', fn)
sys.exit(1)
else:
sz = 4096
print('generating %dx%d procedural image ...' % (sz, sz))
img = np.zeros((sz, sz), np.uint8)
track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0)
track = np.int32(track*10 + (sz/2, sz/2))
cv.polylines(img, [track], 0, 255, 1, cv.LINE_AA)
small = img
for _i in xrange(3):
small = cv.pyrDown(small)
def onmouse(event, x, y, flags, param):
h, _w = img.shape[:2]
h1, _w1 = small.shape[:2]
x, y = 1.0*x*h/h1, 1.0*y*h/h1
zoom = cv.getRectSubPix(img, (800, 600), (x+0.5, y+0.5))
cv.imshow('zoom', zoom)
cv.imshow('preview', small)
cv.setMouseCallback('preview', onmouse)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
This program demonstrates Laplace point/edge detection using
OpenCV function Laplacian()
It captures from the camera of your choice: 0, 1, ... default 0
Usage:
python laplace.py <ddepth> <smoothType> <sigma>
If no arguments given default arguments will be used.
Keyboard Shortcuts:
Press space bar to exit the program.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
def main():
# Declare the variables we are going to use
ddepth = cv.CV_16S
smoothType = "MedianBlur"
sigma = 3
if len(sys.argv)==4:
ddepth = sys.argv[1]
smoothType = sys.argv[2]
sigma = sys.argv[3]
# Taking input from the camera
cap=cv.VideoCapture(0)
# Create Window and Trackbar
cv.namedWindow("Laplace of Image", cv.WINDOW_AUTOSIZE)
cv.createTrackbar("Kernel Size Bar", "Laplace of Image", sigma, 15, lambda x:x)
# Printing frame width, height and FPS
print("=="*40)
print("Frame Width: ", cap.get(cv.CAP_PROP_FRAME_WIDTH), "Frame Height: ", cap.get(cv.CAP_PROP_FRAME_HEIGHT), "FPS: ", cap.get(cv.CAP_PROP_FPS))
while True:
# Reading input from the camera
ret, frame = cap.read()
if ret == False:
print("Can't open camera/video stream")
break
# Taking input/position from the trackbar
sigma = cv.getTrackbarPos("Kernel Size Bar", "Laplace of Image")
# Setting kernel size
ksize = (sigma*5)|1
# Removing noise by blurring with a filter
if smoothType == "GAUSSIAN":
smoothed = cv.GaussianBlur(frame, (ksize, ksize), sigma, sigma)
if smoothType == "BLUR":
smoothed = cv.blur(frame, (ksize, ksize))
if smoothType == "MedianBlur":
smoothed = cv.medianBlur(frame, ksize)
# Apply Laplace function
laplace = cv.Laplacian(smoothed, ddepth, 5)
# Converting back to uint8
result = cv.convertScaleAbs(laplace, (sigma+1)*0.25)
# Display Output
cv.imshow("Laplace of Image", result)
k = cv.waitKey(30)
if k == 27:
return
if __name__ == "__main__":
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
face detection using haar cascades
USAGE:
facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<video_source>]
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# local modules
from video import create_capture
from common import clock, draw_str
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv.rectangle(img, (x1, y1), (x2, y2), color, 2)
def main():
import sys, getopt
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try:
video_src = video_src[0]
except:
video_src = 0
args = dict(args)
cascade_fn = args.get('--cascade', "data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "data/haarcascades/haarcascade_eye.xml")
cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))
cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg')))
while True:
_ret, img = cam.read()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.equalizeHist(gray)
t = clock()
rects = detect(gray, cascade)
vis = img.copy()
draw_rects(vis, rects, (0, 255, 0))
if not nested.empty():
for x1, y1, x2, y2 in rects:
roi = gray[y1:y2, x1:x2]
vis_roi = vis[y1:y2, x1:x2]
subrects = detect(roi.copy(), nested)
draw_rects(vis_roi, subrects, (255, 0, 0))
dt = clock() - t
draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
cv.imshow('facedetect', vis)
if cv.waitKey(5) == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Inpainting sample.
Inpainting repairs damage to images by floodfilling
the damage with surrounding image areas.
Usage:
inpaint.py [<image>]
Keys:
SPACE - inpaint
r - reset the inpainting mask
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from common import Sketcher
def main():
import sys
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)
img_mark = img.copy()
mark = np.zeros(img.shape[:2], np.uint8)
sketch = Sketcher('img', [img_mark, mark], lambda : ((255, 255, 255), 255))
while True:
ch = cv.waitKey()
if ch == 27:
break
if ch == ord(' '):
res = cv.inpaint(img_mark, mark, 3, cv.INPAINT_TELEA)
cv.imshow('inpaint', res)
if ch == ord('r'):
img_mark[:] = img
mark[:] = 0
sketch.show()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
''' This is a sample for histogram plotting for RGB images and grayscale images for better understanding of colour distribution
Benefit : Learn how to draw histogram of images
Get familier with cv.calcHist, cv.equalizeHist,cv.normalize and some drawing functions
Level : Beginner or Intermediate
Functions : 1) hist_curve : returns histogram of an image drawn as curves
2) hist_lines : return histogram of an image drawn as bins ( only for grayscale images )
Usage : python hist.py <image_file>
Abid Rahman 3/14/12 debug Gary Bradski
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
bins = np.arange(256).reshape(256,1)
def hist_curve(im):
h = np.zeros((300,256,3))
if len(im.shape) == 2:
color = [(255,255,255)]
elif im.shape[2] == 3:
color = [ (255,0,0),(0,255,0),(0,0,255) ]
for ch, col in enumerate(color):
hist_item = cv.calcHist([im],[ch],None,[256],[0,256])
cv.normalize(hist_item,hist_item,0,255,cv.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
pts = np.int32(np.column_stack((bins,hist)))
cv.polylines(h,[pts],False,col)
y=np.flipud(h)
return y
def hist_lines(im):
h = np.zeros((300,256,3))
if len(im.shape)!=2:
print("hist_lines applicable only for grayscale images")
#print("so converting image to grayscale for representation"
im = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
hist_item = cv.calcHist([im],[0],None,[256],[0,256])
cv.normalize(hist_item,hist_item,0,255,cv.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
for x,y in enumerate(hist):
cv.line(h,(x,0),(x,y),(255,255,255))
y = np.flipud(h)
return y
def main():
import sys
if len(sys.argv)>1:
fname = sys.argv[1]
else :
fname = 'lena.jpg'
print("usage : python hist.py <image_file>")
im = cv.imread(cv.samples.findFile(fname))
if im is None:
print('Failed to load image file:', fname)
sys.exit(1)
gray = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
print(''' Histogram plotting \n
Keymap :\n
a - show histogram for color image in curve mode \n
b - show histogram in bin mode \n
c - show equalized histogram (always in bin mode) \n
d - show histogram for color image in curve mode \n
e - show histogram for a normalized image in curve mode \n
Esc - exit \n
''')
cv.imshow('image',im)
while True:
k = cv.waitKey(0)
if k == ord('a'):
curve = hist_curve(im)
cv.imshow('histogram',curve)
cv.imshow('image',im)
print('a')
elif k == ord('b'):
print('b')
lines = hist_lines(im)
cv.imshow('histogram',lines)
cv.imshow('image',gray)
elif k == ord('c'):
print('c')
equ = cv.equalizeHist(gray)
lines = hist_lines(equ)
cv.imshow('histogram',lines)
cv.imshow('image',equ)
elif k == ord('d'):
print('d')
curve = hist_curve(gray)
cv.imshow('histogram',curve)
cv.imshow('image',gray)
elif k == ord('e'):
print('e')
norm = cv.normalize(gray, gray, alpha = 0,beta = 255,norm_type = cv.NORM_MINMAX)
lines = hist_lines(norm)
cv.imshow('histogram',lines)
cv.imshow('image',norm)
elif k == 27:
print('ESC')
cv.destroyAllWindows()
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
This program illustrates the use of findContours and drawContours.
The original image is put up along with the image of drawn contours.
Usage:
contours.py
A trackbar is put up which controls the contour level from -3 to 3
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
def make_image():
img = np.zeros((500, 500), np.uint8)
black, white = 0, 255
for i in xrange(6):
dx = int((i%2)*250 - 30)
dy = int((i/2.)*150)
if i == 0:
for j in xrange(11):
angle = (j+5)*np.pi/21
c, s = np.cos(angle), np.sin(angle)
x1, y1 = np.int32([dx+100+j*10-80*c, dy+100-90*s])
x2, y2 = np.int32([dx+100+j*10-30*c, dy+100-30*s])
cv.line(img, (x1, y1), (x2, y2), white)
cv.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 )
cv.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 )
cv.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 )
cv.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 )
cv.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 )
return img
def main():
img = make_image()
h, w = img.shape[:2]
contours0, hierarchy = cv.findContours( img.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
contours = [cv.approxPolyDP(cnt, 3, True) for cnt in contours0]
def update(levels):
vis = np.zeros((h, w, 3), np.uint8)
levels = levels - 3
cv.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255),
3, cv.LINE_AA, hierarchy, abs(levels) )
cv.imshow('contours', vis)
update(3)
cv.createTrackbar( "levels+3", "contours", 3, 7, update )
cv.imshow('image', img)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Digit recognition from video.
Run digits.py before, to train and save the SVM.
Usage:
digits_video.py [{camera_id|video_file}]
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# built-in modules
import os
import sys
# local modules
import video
from common import mosaic
from digits import *
def main():
try:
src = sys.argv[1]
except:
src = 0
cap = video.create_capture(src)
classifier_fn = 'digits_svm.dat'
if not os.path.exists(classifier_fn):
print('"%s" not found, run digits.py first' % classifier_fn)
return
model = cv.ml.SVM_load(classifier_fn)
while True:
_ret, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
bin = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 31, 10)
bin = cv.medianBlur(bin, 3)
contours, heirs = cv.findContours( bin.copy(), cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE)
try:
heirs = heirs[0]
except:
heirs = []
for cnt, heir in zip(contours, heirs):
_, _, _, outer_i = heir
if outer_i >= 0:
continue
x, y, w, h = cv.boundingRect(cnt)
if not (16 <= h <= 64 and w <= 1.2*h):
continue
pad = max(h-w, 0)
x, w = x - (pad // 2), w + pad
cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0))
bin_roi = bin[y:,x:][:h,:w]
m = bin_roi != 0
if not 0.1 < m.mean() < 0.4:
continue
'''
gray_roi = gray[y:,x:][:h,:w]
v_in, v_out = gray_roi[m], gray_roi[~m]
if v_out.std() > 10.0:
continue
s = "%f, %f" % (abs(v_in.mean() - v_out.mean()), v_out.std())
cv.putText(frame, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)
'''
s = 1.5*float(h)/SZ
m = cv.moments(bin_roi)
c1 = np.float32([m['m10'], m['m01']]) / m['m00']
c0 = np.float32([SZ/2, SZ/2])
t = c1 - s*c0
A = np.zeros((2, 3), np.float32)
A[:,:2] = np.eye(2)*s
A[:,2] = t
bin_norm = cv.warpAffine(bin_roi, A, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR)
bin_norm = deskew(bin_norm)
if x+w+SZ < frame.shape[1] and y+SZ < frame.shape[0]:
frame[y:,x+w:][:SZ, :SZ] = bin_norm[...,np.newaxis]
sample = preprocess_hog([bin_norm])
digit = model.predict(sample)[1].ravel()
cv.putText(frame, '%d'%digit, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)
cv.imshow('frame', frame)
cv.imshow('bin', bin)
ch = cv.waitKey(1)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Robust line fitting.
==================
Example of using cv.fitLine function for fitting line
to points in presence of outliers.
Usage
-----
fitline.py
Switch through different M-estimator functions and see,
how well the robust functions fit the line even
in case of ~50% of outliers.
Keys
----
SPACE - generate random points
f - change distance function
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2 as cv
# built-in modules
import itertools as it
# local modules
from common import draw_str
w, h = 512, 256
def toint(p):
return tuple(map(int, p))
def sample_line(p1, p2, n, noise=0.0):
p1 = np.float32(p1)
t = np.random.rand(n,1)
return p1 + (p2-p1)*t + np.random.normal(size=(n, 2))*noise
dist_func_names = it.cycle('DIST_L2 DIST_L1 DIST_L12 DIST_FAIR DIST_WELSCH DIST_HUBER'.split())
if PY3:
cur_func_name = next(dist_func_names)
else:
cur_func_name = dist_func_names.next()
def update(_=None):
noise = cv.getTrackbarPos('noise', 'fit line')
n = cv.getTrackbarPos('point n', 'fit line')
r = cv.getTrackbarPos('outlier %', 'fit line') / 100.0
outn = int(n*r)
p0, p1 = (90, 80), (w-90, h-80)
img = np.zeros((h, w, 3), np.uint8)
cv.line(img, toint(p0), toint(p1), (0, 255, 0))
if n > 0:
line_points = sample_line(p0, p1, n-outn, noise)
outliers = np.random.rand(outn, 2) * (w, h)
points = np.vstack([line_points, outliers])
for p in line_points:
cv.circle(img, toint(p), 2, (255, 255, 255), -1)
for p in outliers:
cv.circle(img, toint(p), 2, (64, 64, 255), -1)
func = getattr(cv, cur_func_name)
vx, vy, cx, cy = cv.fitLine(np.float32(points), func, 0, 0.01, 0.01)
cv.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255))
draw_str(img, (20, 20), cur_func_name)
cv.imshow('fit line', img)
def main():
cv.namedWindow('fit line')
cv.createTrackbar('noise', 'fit line', 3, 50, update)
cv.createTrackbar('point n', 'fit line', 100, 500, update)
cv.createTrackbar('outlier %', 'fit line', 30, 100, update)
while True:
update()
ch = cv.waitKey(0)
if ch == ord('f'):
global cur_func_name
if PY3:
cur_func_name = next(dist_func_names)
else:
cur_func_name = dist_func_names.next()
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
from numpy import random
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
for _i in xrange(cluster_n):
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
n = 100 + random.randint(900)
pts = random.multivariate_normal(mean, cov, n)
points.append( pts )
ref_distrs.append( (mean, cov) )
points = np.float32( np.vstack(points) )
return points, ref_distrs
def draw_gaussain(img, mean, cov, color):
x, y = np.int32(mean)
w, u, _vt = cv.SVDecomp(cov)
ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
s1, s2 = np.sqrt(w)*3.0
cv.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv.LINE_AA)
def main():
cluster_n = 5
img_size = 512
print('press any key to update distributions, ESC - exit\n')
while True:
print('sampling distributions...')
points, ref_distrs = make_gaussians(cluster_n, img_size)
print('EM (opencv) ...')
em = cv.ml.EM_create()
em.setClustersNumber(cluster_n)
em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC)
em.trainEM(points)
means = em.getMeans()
covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232
found_distrs = zip(means, covs)
print('ready!\n')
img = np.zeros((img_size, img_size, 3), np.uint8)
for x, y in np.int32(points):
cv.circle(img, (x, y), 1, (255, 255, 255), -1)
for m, cov in ref_distrs:
draw_gaussain(img, m, cov, (0, 255, 0))
for m, cov in found_distrs:
draw_gaussain(img, m, cov, (0, 0, 255))
cv.imshow('gaussian mixture', img)
ch = cv.waitKey(0)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
VideoCapture sample showcasing some features of the Video4Linux2 backend
Sample shows how VideoCapture class can be used to control parameters
of a webcam such as focus or framerate.
Also the sample provides an example how to access raw images delivered
by the hardware to get a grayscale image in a very efficient fashion.
Keys:
ESC - exit
g - toggle optimized grayscale conversion
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def main():
def decode_fourcc(v):
v = int(v)
return "".join([chr((v >> 8 * i) & 0xFF) for i in range(4)])
font = cv.FONT_HERSHEY_SIMPLEX
color = (0, 255, 0)
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_AUTOFOCUS, False) # Known bug: https://github.com/opencv/opencv/pull/5474
cv.namedWindow("Video")
convert_rgb = True
fps = int(cap.get(cv.CAP_PROP_FPS))
focus = int(min(cap.get(cv.CAP_PROP_FOCUS) * 100, 2**31-1)) # ceil focus to C_LONG as Python3 int can go to +inf
cv.createTrackbar("FPS", "Video", fps, 30, lambda v: cap.set(cv.CAP_PROP_FPS, v))
cv.createTrackbar("Focus", "Video", focus, 100, lambda v: cap.set(cv.CAP_PROP_FOCUS, v / 100))
while True:
_status, img = cap.read()
fourcc = decode_fourcc(cap.get(cv.CAP_PROP_FOURCC))
fps = cap.get(cv.CAP_PROP_FPS)
if not bool(cap.get(cv.CAP_PROP_CONVERT_RGB)):
if fourcc == "MJPG":
img = cv.imdecode(img, cv.IMREAD_GRAYSCALE)
elif fourcc == "YUYV":
img = cv.cvtColor(img, cv.COLOR_YUV2GRAY_YUYV)
else:
print("unsupported format")
break
cv.putText(img, "Mode: {}".format(fourcc), (15, 40), font, 1.0, color)
cv.putText(img, "FPS: {}".format(fps), (15, 80), font, 1.0, color)
cv.imshow("Video", img)
k = cv.waitKey(1)
if k == 27:
break
elif k == ord('g'):
convert_rgb = not convert_rgb
cap.set(cv.CAP_PROP_CONVERT_RGB, convert_rgb)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
This sample demonstrates Canny edge detection.
Usage:
edge.py [<video source>]
Trackbars control edge thresholds.
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2 as cv
import numpy as np
# relative module
import video
# built-in module
import sys
def main():
try:
fn = sys.argv[1]
except:
fn = 0
def nothing(*arg):
pass
cv.namedWindow('edge')
cv.createTrackbar('thrs1', 'edge', 2000, 5000, nothing)
cv.createTrackbar('thrs2', 'edge', 4000, 5000, nothing)
cap = video.create_capture(fn)
while True:
_flag, img = cap.read()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
thrs1 = cv.getTrackbarPos('thrs1', 'edge')
thrs2 = cv.getTrackbarPos('thrs2', 'edge')
edge = cv.Canny(gray, thrs1, thrs2, apertureSize=5)
vis = img.copy()
vis = np.uint8(vis/2.)
vis[edge != 0] = (0, 255, 0)
cv.imshow('edge', vis)
ch = cv.waitKey(5)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Texture flow direction estimation.
Sample shows how cv.cornerEigenValsAndVecs function can be used
to estimate image texture flow direction.
Usage:
texture_flow.py [<image>]
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def main():
import sys
try:
fn = sys.argv[1]
except:
fn = 'starry_night.jpg'
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
h, w = img.shape[:2]
eigen = cv.cornerEigenValsAndVecs(gray, 15, 3)
eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2]
flow = eigen[:,:,2]
vis = img.copy()
vis[:] = (192 + np.uint32(vis)) / 2
d = 12
points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
for x, y in np.int32(points):
vx, vy = np.int32(flow[y, x]*d)
cv.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv.LINE_AA)
cv.imshow('input', img)
cv.imshow('flow', vis)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
The sample demonstrates how to train Random Trees classifier
(or Boosting classifier, or MLP, or Knearest, or Support Vector Machines) using the provided dataset.
We use the sample database letter-recognition.data
from UCI Repository, here is the link:
Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).
UCI Repository of machine learning databases
[http://www.ics.uci.edu/~mlearn/MLRepository.html].
Irvine, CA: University of California, Department of Information and Computer Science.
The dataset consists of 20000 feature vectors along with the
responses - capital latin letters A..Z.
The first 10000 samples are used for training
and the remaining 10000 - to test the classifier.
======================================================
USAGE:
letter_recog.py [--model <model>]
[--data <data fn>]
[--load <model fn>] [--save <model fn>]
Models: RTrees, KNearest, Boost, SVM, MLP
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def load_base(fn):
a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') })
samples, responses = a[:,1:], a[:,0]
return samples, responses
class LetterStatModel(object):
class_n = 26
train_ratio = 0.5
def load(self, fn):
self.model = self.model.load(fn)
def save(self, fn):
self.model.save(fn)
def unroll_samples(self, samples):
sample_n, var_n = samples.shape
new_samples = np.zeros((sample_n * self.class_n, var_n+1), np.float32)
new_samples[:,:-1] = np.repeat(samples, self.class_n, axis=0)
new_samples[:,-1] = np.tile(np.arange(self.class_n), sample_n)
return new_samples
def unroll_responses(self, responses):
sample_n = len(responses)
new_responses = np.zeros(sample_n*self.class_n, np.int32)
resp_idx = np.int32( responses + np.arange(sample_n)*self.class_n )
new_responses[resp_idx] = 1
return new_responses
class RTrees(LetterStatModel):
def __init__(self):
self.model = cv.ml.RTrees_create()
def train(self, samples, responses):
self.model.setMaxDepth(20)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
return resp.ravel()
class KNearest(LetterStatModel):
def __init__(self):
self.model = cv.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10)
return results.ravel()
class Boost(LetterStatModel):
def __init__(self):
self.model = cv.ml.Boost_create()
def train(self, samples, responses):
_sample_n, var_n = samples.shape
new_samples = self.unroll_samples(samples)
new_responses = self.unroll_responses(responses)
var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8)
self.model.setWeakCount(15)
self.model.setMaxDepth(10)
self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types))
def predict(self, samples):
new_samples = self.unroll_samples(samples)
_ret, resp = self.model.predict(new_samples)
return resp.ravel().reshape(-1, self.class_n).argmax(1)
class SVM(LetterStatModel):
def __init__(self):
self.model = cv.ml.SVM_create()
def train(self, samples, responses):
self.model.setType(cv.ml.SVM_C_SVC)
self.model.setC(1)
self.model.setKernel(cv.ml.SVM_RBF)
self.model.setGamma(.1)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
return resp.ravel()
class MLP(LetterStatModel):
def __init__(self):
self.model = cv.ml.ANN_MLP_create()
def train(self, samples, responses):
_sample_n, var_n = samples.shape
new_responses = self.unroll_responses(responses).reshape(-1, self.class_n)
layer_sizes = np.int32([var_n, 100, 100, self.class_n])
self.model.setLayerSizes(layer_sizes)
self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP)
self.model.setBackpropMomentumScale(0.0)
self.model.setBackpropWeightScale(0.001)
self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01))
self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1)
self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
return resp.argmax(-1)
def main():
import getopt
import sys
models = [RTrees, KNearest, Boost, SVM, MLP] # NBayes
models = dict( [(cls.__name__.lower(), cls) for cls in models] )
args, dummy = getopt.getopt(sys.argv[1:], '', ['model=', 'data=', 'load=', 'save='])
args = dict(args)
args.setdefault('--model', 'svm')
args.setdefault('--data', 'letter-recognition.data')
datafile = cv.samples.findFile(args['--data'])
print('loading data %s ...' % datafile)
samples, responses = load_base(datafile)
Model = models[args['--model']]
model = Model()
train_n = int(len(samples)*model.train_ratio)
if '--load' in args:
fn = args['--load']
print('loading model from %s ...' % fn)
model.load(fn)
else:
print('training %s ...' % Model.__name__)
model.train(samples[:train_n], responses[:train_n])
print('testing...')
train_rate = np.mean(model.predict(samples[:train_n]) == responses[:train_n].astype(int))
test_rate = np.mean(model.predict(samples[train_n:]) == responses[train_n:].astype(int))
print('train rate: %f test rate: %f' % (train_rate*100, test_rate*100))
if '--save' in args:
fn = args['--save']
print('saving model to %s ...' % fn)
model.save(fn)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
"""
Tracking of rotating point.
Rotation speed is constant.
Both state and measurements vectors are 1D (a point angle),
Measurement is the real point angle + gaussian noise.
The real and the estimated points are connected with yellow line segment,
the real and the measured points are connected with red line segment.
(if Kalman filter works correctly,
the yellow segment should be shorter than the red one).
Pressing any key (except ESC) will reset the tracking with a different speed.
Pressing ESC will stop the program.
"""
# Python 2/3 compatibility
import sys
PY3 = sys.version_info[0] == 3
if PY3:
long = int
import numpy as np
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
def main():
img_height = 500
img_width = 500
kalman = cv.KalmanFilter(2, 1, 0)
code = long(-1)
cv.namedWindow("Kalman")
while True:
state = 0.1 * np.random.randn(2, 1)
kalman.transitionMatrix = np.array([[1., 1.], [0., 1.]])
kalman.measurementMatrix = 1. * np.ones((1, 2))
kalman.processNoiseCov = 1e-5 * np.eye(2)
kalman.measurementNoiseCov = 1e-1 * np.ones((1, 1))
kalman.errorCovPost = 1. * np.ones((2, 2))
kalman.statePost = 0.1 * np.random.randn(2, 1)
while True:
def calc_point(angle):
return (np.around(img_width/2 + img_width/3*cos(angle), 0).astype(int),
np.around(img_height/2 - img_width/3*sin(angle), 1).astype(int))
state_angle = state[0, 0]
state_pt = calc_point(state_angle)
prediction = kalman.predict()
predict_angle = prediction[0, 0]
predict_pt = calc_point(predict_angle)
measurement = kalman.measurementNoiseCov * np.random.randn(1, 1)
# generate measurement
measurement = np.dot(kalman.measurementMatrix, state) + measurement
measurement_angle = measurement[0, 0]
measurement_pt = calc_point(measurement_angle)
# plot points
def draw_cross(center, color, d):
cv.line(img,
(center[0] - d, center[1] - d), (center[0] + d, center[1] + d),
color, 1, cv.LINE_AA, 0)
cv.line(img,
(center[0] + d, center[1] - d), (center[0] - d, center[1] + d),
color, 1, cv.LINE_AA, 0)
img = np.zeros((img_height, img_width, 3), np.uint8)
draw_cross(np.int32(state_pt), (255, 255, 255), 3)
draw_cross(np.int32(measurement_pt), (0, 0, 255), 3)
draw_cross(np.int32(predict_pt), (0, 255, 0), 3)
cv.line(img, state_pt, measurement_pt, (0, 0, 255), 3, cv.LINE_AA, 0)
cv.line(img, state_pt, predict_pt, (0, 255, 255), 3, cv.LINE_AA, 0)
kalman.correct(measurement)
process_noise = sqrt(kalman.processNoiseCov[0,0]) * np.random.randn(2, 1)
state = np.dot(kalman.transitionMatrix, state) + process_noise
cv.imshow("Kalman", img)
code = cv.waitKey(100)
if code != -1:
break
if code in [27, ord('q'), ord('Q')]:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
example to show optical flow estimation using DISOpticalFlow
USAGE: dis_opt_flow.py [<video_source>]
Keys:
1 - toggle HSV flow visualization
2 - toggle glitch
3 - toggle spatial propagation of flow vectors
4 - toggle temporal propagation of flow vectors
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import video
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
cv.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (_x2, _y2) in lines:
cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*4, 255)
bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
return bgr
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv.remap(img, flow, None, cv.INTER_LINEAR)
return res
def main():
import sys
print(__doc__)
try:
fn = sys.argv[1]
except IndexError:
fn = 0
cam = video.create_capture(fn)
_ret, prev = cam.read()
prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)
show_hsv = False
show_glitch = False
use_spatial_propagation = False
use_temporal_propagation = True
cur_glitch = prev.copy()
inst = cv.DISOpticalFlow.create(cv.DISOPTICAL_FLOW_PRESET_MEDIUM)
inst.setUseSpatialPropagation(use_spatial_propagation)
flow = None
while True:
_ret, img = cam.read()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
if flow is not None and use_temporal_propagation:
#warp previous flow to get an initial approximation for the current flow:
flow = inst.calc(prevgray, gray, warp_flow(flow,flow))
else:
flow = inst.calc(prevgray, gray, None)
prevgray = gray
cv.imshow('flow', draw_flow(gray, flow))
if show_hsv:
cv.imshow('flow HSV', draw_hsv(flow))
if show_glitch:
cur_glitch = warp_flow(cur_glitch, flow)
cv.imshow('glitch', cur_glitch)
ch = 0xFF & cv.waitKey(5)
if ch == 27:
break
if ch == ord('1'):
show_hsv = not show_hsv
print('HSV flow visualization is', ['off', 'on'][show_hsv])
if ch == ord('2'):
show_glitch = not show_glitch
if show_glitch:
cur_glitch = img.copy()
print('glitch is', ['off', 'on'][show_glitch])
if ch == ord('3'):
use_spatial_propagation = not use_spatial_propagation
inst.setUseSpatialPropagation(use_spatial_propagation)
print('spatial propagation is', ['off', 'on'][use_spatial_propagation])
if ch == ord('4'):
use_temporal_propagation = not use_temporal_propagation
print('temporal propagation is', ['off', 'on'][use_temporal_propagation])
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
example to show optical flow
USAGE: opt_flow.py [<video_source>]
Keys:
1 - toggle HSV flow visualization
2 - toggle glitch
Keys:
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import video
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
cv.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (_x2, _y2) in lines:
cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*4, 255)
bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
return bgr
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv.remap(img, flow, None, cv.INTER_LINEAR)
return res
def main():
import sys
try:
fn = sys.argv[1]
except IndexError:
fn = 0
cam = video.create_capture(fn)
_ret, prev = cam.read()
prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)
show_hsv = False
show_glitch = False
cur_glitch = prev.copy()
while True:
_ret, img = cam.read()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
prevgray = gray
cv.imshow('flow', draw_flow(gray, flow))
if show_hsv:
cv.imshow('flow HSV', draw_hsv(flow))
if show_glitch:
cur_glitch = warp_flow(cur_glitch, flow)
cv.imshow('glitch', cur_glitch)
ch = cv.waitKey(5)
if ch == 27:
break
if ch == ord('1'):
show_hsv = not show_hsv
print('HSV flow visualization is', ['off', 'on'][show_hsv])
if ch == ord('2'):
show_glitch = not show_glitch
if show_glitch:
cur_glitch = img.copy()
print('glitch is', ['off', 'on'][show_glitch])
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/python
'''
This example illustrates how to use cv.HoughCircles() function.
Usage:
houghcircles.py [<image_name>]
image argument defaults to board.jpg
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
def main():
try:
fn = sys.argv[1]
except IndexError:
fn = 'board.jpg'
src = cv.imread(cv.samples.findFile(fn))
img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
img = cv.medianBlur(img, 5)
cimg = src.copy() # numpy function
circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)
if circles is not None: # Check if circles have been found and only then iterate over these and add them to the image
_a, b, _c = circles.shape
for i in range(b):
cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv.LINE_AA)
cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv.LINE_AA) # draw center of circle
cv.imshow("detected circles", cimg)
cv.imshow("source", src)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
gabor_threads.py
=========
Sample demonstrates:
- use of multiple Gabor filter convolutions to get Fractalius-like image effect (http://www.redfieldplugins.com/filterFractalius.htm)
- use of python threading to accelerate the computation
Usage
-----
gabor_threads.py [image filename]
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from multiprocessing.pool import ThreadPool
def build_filters():
filters = []
ksize = 31
for theta in np.arange(0, np.pi, np.pi / 16):
kern = cv.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv.CV_32F)
kern /= 1.5*kern.sum()
filters.append(kern)
return filters
def process(img, filters):
accum = np.zeros_like(img)
for kern in filters:
fimg = cv.filter2D(img, cv.CV_8UC3, kern)
np.maximum(accum, fimg, accum)
return accum
def process_threaded(img, filters, threadn = 8):
accum = np.zeros_like(img)
def f(kern):
return cv.filter2D(img, cv.CV_8UC3, kern)
pool = ThreadPool(processes=threadn)
for fimg in pool.imap_unordered(f, filters):
np.maximum(accum, fimg, accum)
return accum
def main():
import sys
from common import Timer
try:
img_fn = sys.argv[1]
except:
img_fn = 'baboon.jpg'
img = cv.imread(cv.samples.findFile(img_fn))
if img is None:
print('Failed to load image file:', img_fn)
sys.exit(1)
filters = build_filters()
with Timer('running single-threaded'):
res1 = process(img, filters)
with Timer('running multi-threaded'):
res2 = process_threaded(img, filters)
print('res1 == res2: ', (res1 == res2).all())
cv.imshow('img', img)
cv.imshow('result', res2)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Wiener deconvolution.
Sample shows how DFT can be used to perform Weiner deconvolution [1]
of an image with user-defined point spread function (PSF)
Usage:
deconvolution.py [--circle]
[--angle <degrees>]
[--d <diameter>]
[--snr <signal/noise ratio in db>]
[<input image>]
Use sliders to adjust PSF paramitiers.
Keys:
SPACE - switch btw linear/circular PSF
ESC - exit
Examples:
deconvolution.py --angle 135 --d 22 licenseplate_motion.jpg
(image source: http://www.topazlabs.com/infocus/_images/licenseplate_compare.jpg)
deconvolution.py --angle 86 --d 31 text_motion.jpg
deconvolution.py --circle --d 19 text_defocus.jpg
(image source: compact digital photo camera, no artificial distortion)
[1] http://en.wikipedia.org/wiki/Wiener_deconvolution
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# local module
from common import nothing
def blur_edge(img, d=31):
h, w = img.shape[:2]
img_pad = cv.copyMakeBorder(img, d, d, d, d, cv.BORDER_WRAP)
img_blur = cv.GaussianBlur(img_pad, (2*d+1, 2*d+1), -1)[d:-d,d:-d]
y, x = np.indices((h, w))
dist = np.dstack([x, w-x-1, y, h-y-1]).min(-1)
w = np.minimum(np.float32(dist)/d, 1.0)
return img*w + img_blur*(1-w)
def motion_kernel(angle, d, sz=65):
kern = np.ones((1, d), np.float32)
c, s = np.cos(angle), np.sin(angle)
A = np.float32([[c, -s, 0], [s, c, 0]])
sz2 = sz // 2
A[:,2] = (sz2, sz2) - np.dot(A[:,:2], ((d-1)*0.5, 0))
kern = cv.warpAffine(kern, A, (sz, sz), flags=cv.INTER_CUBIC)
return kern
def defocus_kernel(d, sz=65):
kern = np.zeros((sz, sz), np.uint8)
cv.circle(kern, (sz, sz), d, 255, -1, cv.LINE_AA, shift=1)
kern = np.float32(kern) / 255.0
return kern
def main():
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], '', ['circle', 'angle=', 'd=', 'snr='])
opts = dict(opts)
try:
fn = args[0]
except:
fn = 'licenseplate_motion.jpg'
win = 'deconvolution'
img = cv.imread(cv.samples.findFile(fn), cv.IMREAD_GRAYSCALE)
if img is None:
print('Failed to load file:', fn)
sys.exit(1)
img = np.float32(img)/255.0
cv.imshow('input', img)
img = blur_edge(img)
IMG = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT)
defocus = '--circle' in opts
def update(_):
ang = np.deg2rad( cv.getTrackbarPos('angle', win) )
d = cv.getTrackbarPos('d', win)
noise = 10**(-0.1*cv.getTrackbarPos('SNR (db)', win))
if defocus:
psf = defocus_kernel(d)
else:
psf = motion_kernel(ang, d)
cv.imshow('psf', psf)
psf /= psf.sum()
psf_pad = np.zeros_like(img)
kh, kw = psf.shape
psf_pad[:kh, :kw] = psf
PSF = cv.dft(psf_pad, flags=cv.DFT_COMPLEX_OUTPUT, nonzeroRows = kh)
PSF2 = (PSF**2).sum(-1)
iPSF = PSF / (PSF2 + noise)[...,np.newaxis]
RES = cv.mulSpectrums(IMG, iPSF, 0)
res = cv.idft(RES, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT )
res = np.roll(res, -kh//2, 0)
res = np.roll(res, -kw//2, 1)
cv.imshow(win, res)
cv.namedWindow(win)
cv.namedWindow('psf', 0)
cv.createTrackbar('angle', win, int(opts.get('--angle', 135)), 180, update)
cv.createTrackbar('d', win, int(opts.get('--d', 22)), 50, update)
cv.createTrackbar('SNR (db)', win, int(opts.get('--snr', 25)), 50, update)
update(None)
while True:
ch = cv.waitKey()
if ch == 27:
break
if ch == ord(' '):
defocus = not defocus
update(None)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Coherence-enhancing filtering example
=====================================
inspired by
Joachim Weickert "Coherence-Enhancing Shock Filters"
http://www.mia.uni-saarland.de/Publications/weickert-dagm03.pdf
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
h, w = img.shape[:2]
for i in xrange(iter_n):
print(i)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
eigen = cv.cornerEigenValsAndVecs(gray, str_sigma, 3)
eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2]
x, y = eigen[:,:,1,0], eigen[:,:,1,1]
gxx = cv.Sobel(gray, cv.CV_32F, 2, 0, ksize=sigma)
gxy = cv.Sobel(gray, cv.CV_32F, 1, 1, ksize=sigma)
gyy = cv.Sobel(gray, cv.CV_32F, 0, 2, ksize=sigma)
gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
m = gvv < 0
ero = cv.erode(img, None)
dil = cv.dilate(img, None)
img1 = ero
img1[m] = dil[m]
img = np.uint8(img*(1.0 - blend) + img1*blend)
print('done')
return img
def main():
import sys
try:
fn = sys.argv[1]
except:
fn = 'baboon.jpg'
src = cv.imread(cv.samples.findFile(fn))
def nothing(*argv):
pass
def update():
sigma = cv.getTrackbarPos('sigma', 'control')*2+1
str_sigma = cv.getTrackbarPos('str_sigma', 'control')*2+1
blend = cv.getTrackbarPos('blend', 'control') / 10.0
print('sigma: %d str_sigma: %d blend_coef: %f' % (sigma, str_sigma, blend))
dst = coherence_filter(src, sigma=sigma, str_sigma = str_sigma, blend = blend)
cv.imshow('dst', dst)
cv.namedWindow('control', 0)
cv.createTrackbar('sigma', 'control', 9, 15, nothing)
cv.createTrackbar('blend', 'control', 7, 10, nothing)
cv.createTrackbar('str_sigma', 'control', 9, 15, nothing)
print('Press SPACE to update the image\n')
cv.imshow('src', src)
update()
while True:
ch = cv.waitKey()
if ch == ord(' '):
update()
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Utility for measuring python opencv API coverage by samples.
'''
# Python 2/3 compatibility
from __future__ import print_function
from glob import glob
import cv2 as cv
import re
if __name__ == '__main__':
cv2_callable = set(['cv.'+name for name in dir(cv) if callable( getattr(cv, name) )])
found = set()
for fn in glob('*.py'):
print(' --- ', fn)
code = open(fn).read()
found |= set(re.findall('cv2?\.\w+', code))
cv2_used = found & cv2_callable
cv2_unused = cv2_callable - cv2_used
with open('unused_api.txt', 'w') as f:
f.write('\n'.join(sorted(cv2_unused)))
r = 1.0 * len(cv2_used) / len(cv2_callable)
print('\ncv api coverage: %d / %d (%.1f%%)' % ( len(cv2_used), len(cv2_callable), r*100 ))
|
#!/usr/bin/env python
'''
mouse_and_match.py [-i path | --input path: default ../data/]
Demonstrate using a mouse to interact with an image:
Read in the images in a directory one by one
Allow the user to select parts of an image with a mouse
When they let go of the mouse, it correlates (using matchTemplate) that patch with the image.
SPACE for next image
ESC to exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# built-in modules
import os
import sys
import glob
import argparse
from math import *
class App():
drag_start = None
sel = (0,0,0,0)
def onmouse(self, event, x, y, flags, param):
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = x, y
self.sel = (0,0,0,0)
elif event == cv.EVENT_LBUTTONUP:
if self.sel[2] > self.sel[0] and self.sel[3] > self.sel[1]:
patch = self.gray[self.sel[1]:self.sel[3], self.sel[0]:self.sel[2]]
result = cv.matchTemplate(self.gray, patch, cv.TM_CCOEFF_NORMED)
result = np.abs(result)**3
_val, result = cv.threshold(result, 0.01, 0, cv.THRESH_TOZERO)
result8 = cv.normalize(result, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)
cv.imshow("result", result8)
self.drag_start = None
elif self.drag_start:
#print flags
if flags & cv.EVENT_FLAG_LBUTTON:
minpos = min(self.drag_start[0], x), min(self.drag_start[1], y)
maxpos = max(self.drag_start[0], x), max(self.drag_start[1], y)
self.sel = (minpos[0], minpos[1], maxpos[0], maxpos[1])
img = cv.cvtColor(self.gray, cv.COLOR_GRAY2BGR)
cv.rectangle(img, (self.sel[0], self.sel[1]), (self.sel[2], self.sel[3]), (0,255,255), 1)
cv.imshow("gray", img)
else:
print("selection is complete")
self.drag_start = None
def run(self):
parser = argparse.ArgumentParser(description='Demonstrate mouse interaction with images')
parser.add_argument("-i","--input", default='../data/', help="Input directory.")
args = parser.parse_args()
path = args.input
cv.namedWindow("gray",1)
cv.setMouseCallback("gray", self.onmouse)
'''Loop through all the images in the directory'''
for infile in glob.glob( os.path.join(path, '*.*') ):
ext = os.path.splitext(infile)[1][1:] #get the filename extension
if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm":
print(infile)
img = cv.imread(infile,1)
if img is None:
continue
self.sel = (0,0,0,0)
self.drag_start = None
self.gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow("gray", self.gray)
if cv.waitKey() == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
App().run()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Digit recognition adjustment.
Grid search is used to find the best parameters for SVM and KNearest classifiers.
SVM adjustment follows the guidelines given in
http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf
Usage:
digits_adjust.py [--model {svm|knearest}]
--model {svm|knearest} - select the classifier (SVM is the default)
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
from multiprocessing.pool import ThreadPool
from digits import *
def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None):
n = len(samples)
folds = np.array_split(np.arange(n), kfold)
def f(i):
model = model_class(**params)
test_idx = folds[i]
train_idx = list(folds)
train_idx.pop(i)
train_idx = np.hstack(train_idx)
train_samples, train_labels = samples[train_idx], labels[train_idx]
test_samples, test_labels = samples[test_idx], labels[test_idx]
model.train(train_samples, train_labels)
resp = model.predict(test_samples)
score = (resp != test_labels).mean()
print(".", end='')
return score
if pool is None:
scores = list(map(f, xrange(kfold)))
else:
scores = pool.map(f, xrange(kfold))
return np.mean(scores)
class App(object):
def __init__(self):
self._samples, self._labels = self.preprocess()
def preprocess(self):
digits, labels = load_digits(DIGITS_FN)
shuffle = np.random.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = list(map(deskew, digits))
samples = preprocess_hog(digits2)
return samples, labels
def get_dataset(self):
return self._samples, self._labels
def run_jobs(self, f, jobs):
pool = ThreadPool(processes=cv.getNumberOfCPUs())
ires = pool.imap_unordered(f, jobs)
return ires
def adjust_SVM(self):
Cs = np.logspace(0, 10, 15, base=2)
gammas = np.logspace(-7, 4, 15, base=2)
scores = np.zeros((len(Cs), len(gammas)))
scores[:] = np.nan
print('adjusting SVM (may take a long time) ...')
def f(job):
i, j = job
samples, labels = self.get_dataset()
params = dict(C = Cs[i], gamma=gammas[j])
score = cross_validate(SVM, params, samples, labels)
return i, j, score
ires = self.run_jobs(f, np.ndindex(*scores.shape))
for count, (i, j, score) in enumerate(ires):
scores[i, j] = score
print('%d / %d (best error: %.2f %%, last: %.2f %%)' %
(count+1, scores.size, np.nanmin(scores)*100, score*100))
print(scores)
print('writing score table to "svm_scores.npz"')
np.savez('svm_scores.npz', scores=scores, Cs=Cs, gammas=gammas)
i, j = np.unravel_index(scores.argmin(), scores.shape)
best_params = dict(C = Cs[i], gamma=gammas[j])
print('best params:', best_params)
print('best error: %.2f %%' % (scores.min()*100))
return best_params
def adjust_KNearest(self):
print('adjusting KNearest ...')
def f(k):
samples, labels = self.get_dataset()
err = cross_validate(KNearest, dict(k=k), samples, labels)
return k, err
best_err, best_k = np.inf, -1
for k, err in self.run_jobs(f, xrange(1, 9)):
if err < best_err:
best_err, best_k = err, k
print('k = %d, error: %.2f %%' % (k, err*100))
best_params = dict(k=best_k)
print('best params:', best_params, 'err: %.2f' % (best_err*100))
return best_params
if __name__ == '__main__':
import getopt
import sys
print(__doc__)
args, _ = getopt.getopt(sys.argv[1:], '', ['model='])
args = dict(args)
args.setdefault('--model', 'svm')
args.setdefault('--env', '')
if args['--model'] not in ['svm', 'knearest']:
print('unknown model "%s"' % args['--model'])
sys.exit(1)
t = clock()
app = App()
if args['--model'] == 'knearest':
app.adjust_KNearest()
else:
app.adjust_SVM()
print('work time: %f s' % (clock() - t))
|
#!/usr/bin/env python
'''
MSER detector demo
==================
Usage:
------
mser.py [<video source>]
Keys:
-----
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import video
import sys
def main():
try:
video_src = sys.argv[1]
except:
video_src = 0
cam = video.create_capture(video_src)
mser = cv.MSER_create()
while True:
ret, img = cam.read()
if ret == 0:
break
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
vis = img.copy()
regions, _ = mser.detectRegions(gray)
hulls = [cv.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv.polylines(vis, hulls, 1, (0, 255, 0))
cv.imshow('img', vis)
if cv.waitKey(5) == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Feature homography
==================
Example of using features2d framework for interactive video homography matching.
ORB features and FLANN matcher are used. The actual tracking is implemented by
PlaneTracker class in plane_tracker.py
Inspired by http://www.youtube.com/watch?v=-ZNYoL8rzPY
video: http://www.youtube.com/watch?v=FirtmYcC0Vc
Usage
-----
feature_homography.py [<video source>]
Keys:
SPACE - pause video
Select a textured planar object to track by drawing a box with a mouse.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# local modules
import video
from video import presets
import common
from common import getsize, draw_keypoints
from plane_tracker import PlaneTracker
class App:
def __init__(self, src):
self.cap = video.create_capture(src, presets['book'])
self.frame = None
self.paused = False
self.tracker = PlaneTracker()
cv.namedWindow('plane')
self.rect_sel = common.RectSelector('plane', self.on_rect)
def on_rect(self, rect):
self.tracker.clear()
self.tracker.add_target(self.frame, rect)
def run(self):
while True:
playing = not self.paused and not self.rect_sel.dragging
if playing or self.frame is None:
ret, frame = self.cap.read()
if not ret:
break
self.frame = frame.copy()
w, h = getsize(self.frame)
vis = np.zeros((h, w*2, 3), np.uint8)
vis[:h,:w] = self.frame
if len(self.tracker.targets) > 0:
target = self.tracker.targets[0]
vis[:,w:] = target.image
draw_keypoints(vis[:,w:], target.keypoints)
x0, y0, x1, y1 = target.rect
cv.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)
if playing:
tracked = self.tracker.track(self.frame)
if len(tracked) > 0:
tracked = tracked[0]
cv.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2)
for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
cv.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
draw_keypoints(vis, self.tracker.frame_points)
self.rect_sel.draw(vis)
cv.imshow('plane', vis)
ch = cv.waitKey(1)
if ch == ord(' '):
self.paused = not self.paused
if ch == 27:
break
if __name__ == '__main__':
print(__doc__)
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
|
#!/usr/bin/env python
'''
Lucas-Kanade tracker
====================
Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack
for track initialization and back-tracking for match verification
between frames.
Usage
-----
lk_track.py [<video_source>]
Keys
----
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import video
from common import anorm2, draw_str
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
class App:
def __init__(self, video_src):
self.track_len = 10
self.detect_interval = 5
self.tracks = []
self.cam = video.create_capture(video_src)
self.frame_idx = 0
def run(self):
while True:
_ret, frame = self.cam.read()
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
vis = frame.copy()
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
self.tracks = new_tracks
cv.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))
if self.frame_idx % self.detect_interval == 0:
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv.circle(mask, (x, y), 5, 0, -1)
p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([(x, y)])
self.frame_idx += 1
self.prev_gray = frame_gray
cv.imshow('lk_track', vis)
ch = cv.waitKey(1)
if ch == 27:
break
def main():
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
This program demonstrates OpenCV drawing and text output functions by drawing different shapes and text strings
Usage :
python3 drawing.py
Press any button to exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# Drawing Lines
def lines():
for i in range(NUMBER*2):
pt1, pt2 = [], []
pt1.append(np.random.randint(x1, x2))
pt1.append(np.random.randint(y1, y2))
pt2.append(np.random.randint(x1, x2))
pt2.append(np.random.randint(y1, y2))
color = "%06x" % np.random.randint(0, 0xFFFFFF)
color = tuple(int(color[i:i+2], 16) for i in (0, 2 ,4))
arrowed = np.random.randint(0, 6)
if (arrowed<3):
cv.line(image, tuple(pt1), tuple(pt2), color, np.random.randint(1, 10), lineType)
else:
cv.arrowedLine(image, tuple(pt1), tuple(pt2), color, np.random.randint(1, 10), lineType)
cv.imshow(wndname, image)
if cv.waitKey(DELAY)>=0:
return
# Drawing Rectangle
def rectangle():
for i in range(NUMBER*2):
pt1, pt2 = [], []
pt1.append(np.random.randint(x1, x2))
pt1.append(np.random.randint(y1, y2))
pt2.append(np.random.randint(x1, x2))
pt2.append(np.random.randint(y1, y2))
color = "%06x" % np.random.randint(0, 0xFFFFFF)
color = tuple(int(color[i:i+2], 16) for i in (0, 2 ,4))
thickness = np.random.randint(-3, 10)
marker = np.random.randint(0, 10)
marker_size = np.random.randint(30, 80)
if (marker > 5):
cv.rectangle(image, tuple(pt1), tuple(pt2), color, max(thickness, -1), lineType)
else:
cv.drawMarker(image, tuple(pt1), color, marker, marker_size)
cv.imshow(wndname, image)
if cv.waitKey(DELAY)>=0:
return
# Drawing ellipse
def ellipse():
for i in range(NUMBER*2):
center = []
center.append(np.random.randint(x1, x2))
center.append(np.random.randint(x1, x2))
axes = []
axes.append(np.random.randint(0, 200))
axes.append(np.random.randint(0, 200))
angle = np.random.randint(0, 180)
color = "%06x" % np.random.randint(0, 0xFFFFFF)
color = tuple(int(color[i:i+2], 16) for i in (0, 2 ,4))
thickness = np.random.randint(-1, 9)
cv.ellipse(image, tuple(center), tuple(axes), angle, angle-100, angle + 200, color, thickness, lineType)
cv.imshow(wndname, image)
if cv.waitKey(DELAY)>=0:
return
# Drawing Polygonal Curves
def polygonal():
for i in range(NUMBER):
pt = [(0, 0)]*6
pt = np.resize(pt, (2, 3, 2))
pt[0][0][0] = np.random.randint(x1, x2)
pt[0][0][1] = np.random.randint(y1, y2)
pt[0][1][0] = np.random.randint(x1, x2)
pt[0][1][1] = np.random.randint(y1, y2)
pt[0][2][0] = np.random.randint(x1, x2)
pt[0][2][1] = np.random.randint(y1, y2)
pt[1][0][0] = np.random.randint(x1, x2)
pt[1][0][1] = np.random.randint(y1, y2)
pt[1][1][0] = np.random.randint(x1, x2)
pt[1][1][1] = np.random.randint(y1, y2)
pt[1][2][0] = np.random.randint(x1, x2)
pt[1][2][1] = np.random.randint(y1, y2)
color = "%06x" % np.random.randint(0, 0xFFFFFF)
color = tuple(int(color[i:i+2], 16) for i in (0, 2 ,4))
alist = []
for k in pt[0]:
alist.append(k)
for k in pt[1]:
alist.append(k)
ppt = np.array(alist)
cv.polylines(image, [ppt], True, color, thickness = np.random.randint(1, 10), lineType = lineType)
cv.imshow(wndname, image)
if cv.waitKey(DELAY) >= 0:
return
# fills an area bounded by several polygonal contours
def fill():
for i in range(NUMBER):
pt = [(0, 0)]*6
pt = np.resize(pt, (2, 3, 2))
pt[0][0][0] = np.random.randint(x1, x2)
pt[0][0][1] = np.random.randint(y1, y2)
pt[0][1][0] = np.random.randint(x1, x2)
pt[0][1][1] = np.random.randint(y1, y2)
pt[0][2][0] = np.random.randint(x1, x2)
pt[0][2][1] = np.random.randint(y1, y2)
pt[1][0][0] = np.random.randint(x1, x2)
pt[1][0][1] = np.random.randint(y1, y2)
pt[1][1][0] = np.random.randint(x1, x2)
pt[1][1][1] = np.random.randint(y1, y2)
pt[1][2][0] = np.random.randint(x1, x2)
pt[1][2][1] = np.random.randint(y1, y2)
color = "%06x" % np.random.randint(0, 0xFFFFFF)
color = tuple(int(color[i:i+2], 16) for i in (0, 2 ,4))
alist = []
for k in pt[0]:
alist.append(k)
for k in pt[1]:
alist.append(k)
ppt = np.array(alist)
cv.fillPoly(image, [ppt], color, lineType)
cv.imshow(wndname, image)
if cv.waitKey(DELAY) >= 0:
return
# Drawing Circles
def circles():
for i in range(NUMBER):
center = []
center.append(np.random.randint(x1, x2))
center.append(np.random.randint(x1, x2))
color = "%06x" % np.random.randint(0, 0xFFFFFF)
color = tuple(int(color[i:i+2], 16) for i in (0, 2 ,4))
cv.circle(image, tuple(center), np.random.randint(0, 300), color, np.random.randint(-1, 9), lineType)
cv.imshow(wndname, image)
if cv.waitKey(DELAY) >= 0:
return
# Draws a text string
def string():
for i in range(NUMBER):
org = []
org.append(np.random.randint(x1, x2))
org.append(np.random.randint(x1, x2))
color = "%06x" % np.random.randint(0, 0xFFFFFF)
color = tuple(int(color[i:i+2], 16) for i in (0, 2 ,4))
cv.putText(image, "Testing text rendering", tuple(org), np.random.randint(0, 8), np.random.randint(0, 100)*0.05+0.1, color, np.random.randint(1, 10), lineType)
cv.imshow(wndname, image)
if cv.waitKey(DELAY) >= 0:
return
def string1():
textsize = cv.getTextSize("OpenCV forever!", cv.FONT_HERSHEY_COMPLEX, 3, 5)
org = (int((width - textsize[0][0])/2), int((height - textsize[0][1])/2))
for i in range(0, 255, 2):
image2 = np.array(image) - i
cv.putText(image2, "OpenCV forever!", org, cv.FONT_HERSHEY_COMPLEX, 3, (i, i, 255), 5, lineType)
cv.imshow(wndname, image2)
if cv.waitKey(DELAY) >= 0:
return
if __name__ == '__main__':
print(__doc__)
wndname = "Drawing Demo"
NUMBER = 100
DELAY = 5
width, height = 1000, 700
lineType = cv.LINE_AA # change it to LINE_8 to see non-antialiased graphics
x1, x2, y1, y2 = -width/2, width*3/2, -height/2, height*3/2
image = np.zeros((height, width, 3), dtype = np.uint8)
cv.imshow(wndname, image)
cv.waitKey(DELAY)
lines()
rectangle()
ellipse()
polygonal()
fill()
circles()
string()
string1()
cv.waitKey(0)
cv.destroyAllWindows() |
#!/usr/bin/env python
'''
Multitarget planar tracking
==================
Example of using features2d framework for interactive video homography matching.
ORB features and FLANN matcher are used. This sample provides PlaneTracker class
and an example of its usage.
video: http://www.youtube.com/watch?v=pzVbhxx6aog
Usage
-----
plane_tracker.py [<video source>]
Keys:
SPACE - pause video
c - clear targets
Select a textured planar object to track by drawing a box with a mouse.
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
# built-in modules
from collections import namedtuple
# local modules
import video
import common
from video import presets
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
MIN_MATCH_COUNT = 10
'''
image - image to track
rect - tracked rectangle (x1, y1, x2, y2)
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
'''
PlanarTarget = namedtuple('PlaneTarget', 'image, rect, keypoints, descrs, data')
'''
target - reference to PlanarTarget
p0 - matched points coords in target image
p1 - matched points coords in input frame
H - homography matrix from p0 to p1
quad - target boundary quad in input frame
'''
TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad')
class PlaneTracker:
def __init__(self):
self.detector = cv.ORB_create( nfeatures = 1000 )
self.matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
self.targets = []
self.frame_points = []
def add_target(self, image, rect, data=None):
'''Add a new tracking target.'''
x0, y0, x1, y1 = rect
raw_points, raw_descrs = self.detect_features(image)
points, descs = [], []
for kp, desc in zip(raw_points, raw_descrs):
x, y = kp.pt
if x0 <= x <= x1 and y0 <= y <= y1:
points.append(kp)
descs.append(desc)
descs = np.uint8(descs)
self.matcher.add([descs])
target = PlanarTarget(image = image, rect=rect, keypoints = points, descrs=descs, data=data)
self.targets.append(target)
def clear(self):
'''Remove all targets'''
self.targets = []
self.matcher.clear()
def track(self, frame):
'''Returns a list of detected TrackedTarget objects'''
self.frame_points, frame_descrs = self.detect_features(frame)
if len(self.frame_points) < MIN_MATCH_COUNT:
return []
matches = self.matcher.knnMatch(frame_descrs, k = 2)
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
if len(matches) < MIN_MATCH_COUNT:
return []
matches_by_id = [[] for _ in xrange(len(self.targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < MIN_MATCH_COUNT:
continue
target = self.targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [self.frame_points[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv.findHomography(p0, p1, cv.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < MIN_MATCH_COUNT:
continue
p0, p1 = p0[status], p1[status]
x0, y0, x1, y1 = target.rect
quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
quad = cv.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
tracked.append(track)
tracked.sort(key = lambda t: len(t.p0), reverse=True)
return tracked
def detect_features(self, frame):
'''detect_features(self, frame) -> keypoints, descrs'''
keypoints, descrs = self.detector.detectAndCompute(frame, None)
if descrs is None: # detectAndCompute returns descs=None if not keypoints found
descrs = []
return keypoints, descrs
class App:
def __init__(self, src):
self.cap = video.create_capture(src, presets['book'])
self.frame = None
self.paused = False
self.tracker = PlaneTracker()
cv.namedWindow('plane')
self.rect_sel = common.RectSelector('plane', self.on_rect)
def on_rect(self, rect):
self.tracker.add_target(self.frame, rect)
def run(self):
while True:
playing = not self.paused and not self.rect_sel.dragging
if playing or self.frame is None:
ret, frame = self.cap.read()
if not ret:
break
self.frame = frame.copy()
vis = self.frame.copy()
if playing:
tracked = self.tracker.track(self.frame)
for tr in tracked:
cv.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2)
for (x, y) in np.int32(tr.p1):
cv.circle(vis, (x, y), 2, (255, 255, 255))
self.rect_sel.draw(vis)
cv.imshow('plane', vis)
ch = cv.waitKey(1)
if ch == ord(' '):
self.paused = not self.paused
if ch == ord('c'):
self.tracker.clear()
if ch == 27:
break
if __name__ == '__main__':
print(__doc__)
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
|
#!/usr/bin/env python
'''
Camshift tracker
================
This is a demo that shows mean-shift based tracking
You select a color objects such as your face and it tracks it.
This reads from video camera (0 by default, or the camera number the user enters)
[1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.7673
Usage:
------
camshift.py [<video source>]
To initialize tracking, select the object with mouse
Keys:
-----
ESC - exit
b - toggle back-projected probability visualization
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
# local module
import video
from video import presets
class App(object):
def __init__(self, video_src):
self.cam = video.create_capture(video_src, presets['cube'])
_ret, self.frame = self.cam.read()
cv.namedWindow('camshift')
cv.setMouseCallback('camshift', self.onmouse)
self.selection = None
self.drag_start = None
self.show_backproj = False
self.track_window = None
def onmouse(self, event, x, y, flags, param):
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.track_window = None
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax, ymax)
if event == cv.EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv.cvtColor(img, cv.COLOR_HSV2BGR)
cv.imshow('hist', img)
def run(self):
while True:
_ret, self.frame = self.cam.read()
vis = self.frame.copy()
hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if self.selection:
x0, y0, x1, y1 = self.selection
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX)
self.hist = hist.reshape(-1)
self.show_hist()
vis_roi = vis[y0:y1, x0:x1]
cv.bitwise_not(vis_roi, vis_roi)
vis[mask == 0] = 0
if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0:
self.selection = None
prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit)
if self.show_backproj:
vis[:] = prob[...,np.newaxis]
try:
cv.ellipse(vis, track_box, (0, 0, 255), 2)
except:
print(track_box)
cv.imshow('camshift', vis)
ch = cv.waitKey(5)
if ch == 27:
break
if ch == ord('b'):
self.show_backproj = not self.show_backproj
cv.destroyAllWindows()
if __name__ == '__main__':
print(__doc__)
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
|
#!/usr/bin/env python
'''
Scans current directory for *.py files and reports
ones with missing __doc__ string.
'''
# Python 2/3 compatibility
from __future__ import print_function
from glob import glob
if __name__ == '__main__':
print('--- undocumented files:')
for fn in glob('*.py'):
loc = {}
try:
try:
execfile(fn, loc) # Python 2
except NameError:
exec(open(fn).read(), loc) # Python 3
except Exception:
pass
if '__doc__' not in loc:
print(fn)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from numpy import linspace
def inverse_homogeneoux_matrix(M):
R = M[0:3, 0:3]
T = M[0:3, 3]
M_inv = np.identity(4)
M_inv[0:3, 0:3] = R.T
M_inv[0:3, 3] = -(R.T).dot(T)
return M_inv
def transform_to_matplotlib_frame(cMo, X, inverse=False):
M = np.identity(4)
M[1,1] = 0
M[1,2] = 1
M[2,1] = -1
M[2,2] = 0
if inverse:
return M.dot(inverse_homogeneoux_matrix(cMo).dot(X))
else:
return M.dot(cMo.dot(X))
def create_camera_model(camera_matrix, width, height, scale_focal, draw_frame_axis=False):
fx = camera_matrix[0,0]
fy = camera_matrix[1,1]
focal = 2 / (fx + fy)
f_scale = scale_focal * focal
# draw image plane
X_img_plane = np.ones((4,5))
X_img_plane[0:3,0] = [-width, height, f_scale]
X_img_plane[0:3,1] = [width, height, f_scale]
X_img_plane[0:3,2] = [width, -height, f_scale]
X_img_plane[0:3,3] = [-width, -height, f_scale]
X_img_plane[0:3,4] = [-width, height, f_scale]
# draw triangle above the image plane
X_triangle = np.ones((4,3))
X_triangle[0:3,0] = [-width, -height, f_scale]
X_triangle[0:3,1] = [0, -2*height, f_scale]
X_triangle[0:3,2] = [width, -height, f_scale]
# draw camera
X_center1 = np.ones((4,2))
X_center1[0:3,0] = [0, 0, 0]
X_center1[0:3,1] = [-width, height, f_scale]
X_center2 = np.ones((4,2))
X_center2[0:3,0] = [0, 0, 0]
X_center2[0:3,1] = [width, height, f_scale]
X_center3 = np.ones((4,2))
X_center3[0:3,0] = [0, 0, 0]
X_center3[0:3,1] = [width, -height, f_scale]
X_center4 = np.ones((4,2))
X_center4[0:3,0] = [0, 0, 0]
X_center4[0:3,1] = [-width, -height, f_scale]
# draw camera frame axis
X_frame1 = np.ones((4,2))
X_frame1[0:3,0] = [0, 0, 0]
X_frame1[0:3,1] = [f_scale/2, 0, 0]
X_frame2 = np.ones((4,2))
X_frame2[0:3,0] = [0, 0, 0]
X_frame2[0:3,1] = [0, f_scale/2, 0]
X_frame3 = np.ones((4,2))
X_frame3[0:3,0] = [0, 0, 0]
X_frame3[0:3,1] = [0, 0, f_scale/2]
if draw_frame_axis:
return [X_img_plane, X_triangle, X_center1, X_center2, X_center3, X_center4, X_frame1, X_frame2, X_frame3]
else:
return [X_img_plane, X_triangle, X_center1, X_center2, X_center3, X_center4]
def create_board_model(extrinsics, board_width, board_height, square_size, draw_frame_axis=False):
width = board_width*square_size
height = board_height*square_size
# draw calibration board
X_board = np.ones((4,5))
#X_board_cam = np.ones((extrinsics.shape[0],4,5))
X_board[0:3,0] = [0,0,0]
X_board[0:3,1] = [width,0,0]
X_board[0:3,2] = [width,height,0]
X_board[0:3,3] = [0,height,0]
X_board[0:3,4] = [0,0,0]
# draw board frame axis
X_frame1 = np.ones((4,2))
X_frame1[0:3,0] = [0, 0, 0]
X_frame1[0:3,1] = [height/2, 0, 0]
X_frame2 = np.ones((4,2))
X_frame2[0:3,0] = [0, 0, 0]
X_frame2[0:3,1] = [0, height/2, 0]
X_frame3 = np.ones((4,2))
X_frame3[0:3,0] = [0, 0, 0]
X_frame3[0:3,1] = [0, 0, height/2]
if draw_frame_axis:
return [X_board, X_frame1, X_frame2, X_frame3]
else:
return [X_board]
def draw_camera_boards(ax, camera_matrix, cam_width, cam_height, scale_focal,
extrinsics, board_width, board_height, square_size,
patternCentric):
from matplotlib import cm
min_values = np.zeros((3,1))
min_values = np.inf
max_values = np.zeros((3,1))
max_values = -np.inf
if patternCentric:
X_moving = create_camera_model(camera_matrix, cam_width, cam_height, scale_focal)
X_static = create_board_model(extrinsics, board_width, board_height, square_size)
else:
X_static = create_camera_model(camera_matrix, cam_width, cam_height, scale_focal, True)
X_moving = create_board_model(extrinsics, board_width, board_height, square_size)
cm_subsection = linspace(0.0, 1.0, extrinsics.shape[0])
colors = [ cm.jet(x) for x in cm_subsection ]
for i in range(len(X_static)):
X = np.zeros(X_static[i].shape)
for j in range(X_static[i].shape[1]):
X[:,j] = transform_to_matplotlib_frame(np.eye(4), X_static[i][:,j])
ax.plot3D(X[0,:], X[1,:], X[2,:], color='r')
min_values = np.minimum(min_values, X[0:3,:].min(1))
max_values = np.maximum(max_values, X[0:3,:].max(1))
for idx in range(extrinsics.shape[0]):
R, _ = cv.Rodrigues(extrinsics[idx,0:3])
cMo = np.eye(4,4)
cMo[0:3,0:3] = R
cMo[0:3,3] = extrinsics[idx,3:6]
for i in range(len(X_moving)):
X = np.zeros(X_moving[i].shape)
for j in range(X_moving[i].shape[1]):
X[0:4,j] = transform_to_matplotlib_frame(cMo, X_moving[i][0:4,j], patternCentric)
ax.plot3D(X[0,:], X[1,:], X[2,:], color=colors[idx])
min_values = np.minimum(min_values, X[0:3,:].min(1))
max_values = np.maximum(max_values, X[0:3,:].max(1))
return min_values, max_values
def main():
import argparse
parser = argparse.ArgumentParser(description='Plot camera calibration extrinsics.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--calibration', type=str, default='left_intrinsics.yml',
help='YAML camera calibration file.')
parser.add_argument('--cam_width', type=float, default=0.064/2,
help='Width/2 of the displayed camera.')
parser.add_argument('--cam_height', type=float, default=0.048/2,
help='Height/2 of the displayed camera.')
parser.add_argument('--scale_focal', type=float, default=40,
help='Value to scale the focal length.')
parser.add_argument('--patternCentric', action='store_true',
help='The calibration board is static and the camera is moving.')
args = parser.parse_args()
fs = cv.FileStorage(cv.samples.findFile(args.calibration), cv.FILE_STORAGE_READ)
board_width = int(fs.getNode('board_width').real())
board_height = int(fs.getNode('board_height').real())
square_size = fs.getNode('square_size').real()
camera_matrix = fs.getNode('camera_matrix').mat()
extrinsics = fs.getNode('extrinsic_parameters').mat()
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-variable
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
cam_width = args.cam_width
cam_height = args.cam_height
scale_focal = args.scale_focal
min_values, max_values = draw_camera_boards(ax, camera_matrix, cam_width, cam_height,
scale_focal, extrinsics, board_width,
board_height, square_size, args.patternCentric)
X_min = min_values[0]
X_max = max_values[0]
Y_min = min_values[1]
Y_max = max_values[1]
Z_min = min_values[2]
Z_max = max_values[2]
max_range = np.array([X_max-X_min, Y_max-Y_min, Z_max-Z_min]).max() / 2.0
mid_x = (X_max+X_min) * 0.5
mid_y = (Y_max+Y_min) * 0.5
mid_z = (Z_max+Z_min) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.set_xlabel('x')
ax.set_ylabel('z')
ax.set_zlabel('-y')
ax.set_title('Extrinsic Parameters Visualization')
plt.show()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Planar augmented reality
==================
This sample shows an example of augmented reality overlay over a planar object
tracked by PlaneTracker from plane_tracker.py. solvePnP function is used to
estimate the tracked object location in 3d space.
video: http://www.youtube.com/watch?v=pzVbhxx6aog
Usage
-----
plane_ar.py [<video source>]
Keys:
SPACE - pause video
c - clear targets
Select a textured planar object to track by drawing a box with a mouse.
Use 'focal' slider to adjust to camera focal length for proper video augmentation.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import video
import common
from plane_tracker import PlaneTracker
from video import presets
# Simple model of a house - cube with a triangular prism "roof"
ar_verts = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0],
[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1],
[0, 0.5, 2], [1, 0.5, 2]])
ar_edges = [(0, 1), (1, 2), (2, 3), (3, 0),
(4, 5), (5, 6), (6, 7), (7, 4),
(0, 4), (1, 5), (2, 6), (3, 7),
(4, 8), (5, 8), (6, 9), (7, 9), (8, 9)]
class App:
def __init__(self, src):
self.cap = video.create_capture(src, presets['book'])
self.frame = None
self.paused = False
self.tracker = PlaneTracker()
cv.namedWindow('plane')
cv.createTrackbar('focal', 'plane', 25, 50, common.nothing)
self.rect_sel = common.RectSelector('plane', self.on_rect)
def on_rect(self, rect):
self.tracker.add_target(self.frame, rect)
def run(self):
while True:
playing = not self.paused and not self.rect_sel.dragging
if playing or self.frame is None:
ret, frame = self.cap.read()
if not ret:
break
self.frame = frame.copy()
vis = self.frame.copy()
if playing:
tracked = self.tracker.track(self.frame)
for tr in tracked:
cv.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2)
for (x, y) in np.int32(tr.p1):
cv.circle(vis, (x, y), 2, (255, 255, 255))
self.draw_overlay(vis, tr)
self.rect_sel.draw(vis)
cv.imshow('plane', vis)
ch = cv.waitKey(1)
if ch == ord(' '):
self.paused = not self.paused
if ch == ord('c'):
self.tracker.clear()
if ch == 27:
break
def draw_overlay(self, vis, tracked):
x0, y0, x1, y1 = tracked.target.rect
quad_3d = np.float32([[x0, y0, 0], [x1, y0, 0], [x1, y1, 0], [x0, y1, 0]])
fx = 0.5 + cv.getTrackbarPos('focal', 'plane') / 50.0
h, w = vis.shape[:2]
K = np.float64([[fx*w, 0, 0.5*(w-1)],
[0, fx*w, 0.5*(h-1)],
[0.0,0.0, 1.0]])
dist_coef = np.zeros(4)
_ret, rvec, tvec = cv.solvePnP(quad_3d, tracked.quad, K, dist_coef)
verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0)
verts = cv.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)
for i, j in ar_edges:
(x0, y0), (x1, y1) = verts[i], verts[j]
cv.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2)
if __name__ == '__main__':
print(__doc__)
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
|
#!/usr/bin/env python
'''
K-means clusterization sample.
Usage:
kmeans.py
Keyboard shortcuts:
ESC - exit
space - generate new distribution
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from gaussian_mix import make_gaussians
def main():
cluster_n = 5
img_size = 512
# generating bright palette
colors = np.zeros((1, cluster_n, 3), np.uint8)
colors[0,:] = 255
colors[0,:,0] = np.arange(0, 180, 180.0/cluster_n)
colors = cv.cvtColor(colors, cv.COLOR_HSV2BGR)[0]
while True:
print('sampling distributions...')
points, _ = make_gaussians(cluster_n, img_size)
term_crit = (cv.TERM_CRITERIA_EPS, 30, 0.1)
_ret, labels, _centers = cv.kmeans(points, cluster_n, None, term_crit, 10, 0)
img = np.zeros((img_size, img_size, 3), np.uint8)
for (x, y), label in zip(np.int32(points), labels.ravel()):
c = list(map(int, colors[label]))
cv.circle(img, (x, y), 1, c, -1)
cv.imshow('kmeans', img)
ch = cv.waitKey(0)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Morphology operations.
Usage:
morphology.py [<image>]
Keys:
1 - change operation
2 - change structure element shape
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2 as cv
def main():
import sys
from itertools import cycle
from common import draw_str
try:
fn = sys.argv[1]
except:
fn = 'baboon.jpg'
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)
cv.imshow('original', img)
modes = cycle(['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient'])
str_modes = cycle(['ellipse', 'rect', 'cross'])
if PY3:
cur_mode = next(modes)
cur_str_mode = next(str_modes)
else:
cur_mode = modes.next()
cur_str_mode = str_modes.next()
def update(dummy=None):
sz = cv.getTrackbarPos('op/size', 'morphology')
iters = cv.getTrackbarPos('iters', 'morphology')
opers = cur_mode.split('/')
if len(opers) > 1:
sz = sz - 10
op = opers[sz > 0]
sz = abs(sz)
else:
op = opers[0]
sz = sz*2+1
str_name = 'MORPH_' + cur_str_mode.upper()
oper_name = 'MORPH_' + op.upper()
st = cv.getStructuringElement(getattr(cv, str_name), (sz, sz))
res = cv.morphologyEx(img, getattr(cv, oper_name), st, iterations=iters)
draw_str(res, (10, 20), 'mode: ' + cur_mode)
draw_str(res, (10, 40), 'operation: ' + oper_name)
draw_str(res, (10, 60), 'structure: ' + str_name)
draw_str(res, (10, 80), 'ksize: %d iters: %d' % (sz, iters))
cv.imshow('morphology', res)
cv.namedWindow('morphology')
cv.createTrackbar('op/size', 'morphology', 12, 20, update)
cv.createTrackbar('iters', 'morphology', 1, 10, update)
update()
while True:
ch = cv.waitKey()
if ch == 27:
break
if ch == ord('1'):
if PY3:
cur_mode = next(modes)
else:
cur_mode = modes.next()
if ch == ord('2'):
if PY3:
cur_str_mode = next(str_modes)
else:
cur_str_mode = str_modes.next()
update()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Multithreaded video processing sample.
Usage:
video_threaded.py {<video device number>|<video file name>}
Shows how python threading capabilities can be used
to organize parallel captured frame processing pipeline
for smoother playback.
Keyboard shortcuts:
ESC - exit
space - switch between multi and single threaded processing
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from multiprocessing.pool import ThreadPool
from collections import deque
from common import clock, draw_str, StatValue
import video
class DummyTask:
def __init__(self, data):
self.data = data
def ready(self):
return True
def get(self):
return self.data
def main():
import sys
try:
fn = sys.argv[1]
except:
fn = 0
cap = video.create_capture(fn)
def process_frame(frame, t0):
# some intensive computation...
frame = cv.medianBlur(frame, 19)
frame = cv.medianBlur(frame, 19)
return frame, t0
threadn = cv.getNumberOfCPUs()
pool = ThreadPool(processes = threadn)
pending = deque()
threaded_mode = True
latency = StatValue()
frame_interval = StatValue()
last_frame_time = clock()
while True:
while len(pending) > 0 and pending[0].ready():
res, t0 = pending.popleft().get()
latency.update(clock() - t0)
draw_str(res, (20, 20), "threaded : " + str(threaded_mode))
draw_str(res, (20, 40), "latency : %.1f ms" % (latency.value*1000))
draw_str(res, (20, 60), "frame interval : %.1f ms" % (frame_interval.value*1000))
cv.imshow('threaded video', res)
if len(pending) < threadn:
_ret, frame = cap.read()
t = clock()
frame_interval.update(t - last_frame_time)
last_frame_time = t
if threaded_mode:
task = pool.apply_async(process_frame, (frame.copy(), t))
else:
task = DummyTask(process_frame(frame, t))
pending.append(task)
ch = cv.waitKey(1)
if ch == ord(' '):
threaded_mode = not threaded_mode
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Watershed segmentation
=========
This program demonstrates the watershed segmentation algorithm
in OpenCV: watershed().
Usage
-----
watershed.py [image filename]
Keys
----
1-7 - switch marker color
SPACE - update segmentation
r - reset
a - toggle autoupdate
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from common import Sketcher
class App:
def __init__(self, fn):
self.img = cv.imread(fn)
if self.img is None:
raise Exception('Failed to load image file: %s' % fn)
h, w = self.img.shape[:2]
self.markers = np.zeros((h, w), np.int32)
self.markers_vis = self.img.copy()
self.cur_marker = 1
self.colors = np.int32( list(np.ndindex(2, 2, 2)) ) * 255
self.auto_update = True
self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors)
def get_colors(self):
return list(map(int, self.colors[self.cur_marker])), self.cur_marker
def watershed(self):
m = self.markers.copy()
cv.watershed(self.img, m)
overlay = self.colors[np.maximum(m, 0)]
vis = cv.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv.CV_8UC3)
cv.imshow('watershed', vis)
def run(self):
while cv.getWindowProperty('img', 0) != -1 or cv.getWindowProperty('watershed', 0) != -1:
ch = cv.waitKey(50)
if ch == 27:
break
if ch >= ord('1') and ch <= ord('7'):
self.cur_marker = ch - ord('0')
print('marker: ', self.cur_marker)
if ch == ord(' ') or (self.sketch.dirty and self.auto_update):
self.watershed()
self.sketch.dirty = False
if ch in [ord('a'), ord('A')]:
self.auto_update = not self.auto_update
print('auto_update if', ['off', 'on'][self.auto_update])
if ch in [ord('r'), ord('R')]:
self.markers[:] = 0
self.markers_vis[:] = self.img
self.sketch.show()
cv.destroyAllWindows()
if __name__ == '__main__':
print(__doc__)
import sys
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
App(cv.samples.findFile(fn)).run()
|
#!/usr/bin/env python
'''
example to detect upright people in images using HOG features
Usage:
peopledetect.py <image_names>
Press any key to continue, ESC to stop.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def inside(r, q):
rx, ry, rw, rh = r
qx, qy, qw, qh = q
return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
def draw_detections(img, rects, thickness = 1):
for x, y, w, h in rects:
# the HOG detector returns slightly larger rectangles than the real objects.
# so we slightly shrink the rectangles to get a nicer output.
pad_w, pad_h = int(0.15*w), int(0.05*h)
cv.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)
def main():
import sys
from glob import glob
import itertools as it
hog = cv.HOGDescriptor()
hog.setSVMDetector( cv.HOGDescriptor_getDefaultPeopleDetector() )
default = [cv.samples.findFile('basketball2.png')] if len(sys.argv[1:]) == 0 else []
for fn in it.chain(*map(glob, default + sys.argv[1:])):
print(fn, ' - ',)
try:
img = cv.imread(fn)
if img is None:
print('Failed to load image file:', fn)
continue
except:
print('loading error')
continue
found, _w = hog.detectMultiScale(img, winStride=(8,8), padding=(32,32), scale=1.05)
found_filtered = []
for ri, r in enumerate(found):
for qi, q in enumerate(found):
if ri != qi and inside(r, q):
break
else:
found_filtered.append(r)
draw_detections(img, found)
draw_detections(img, found_filtered, 3)
print('%d (%d) found' % (len(found_filtered), len(found)))
cv.imshow('img', img)
ch = cv.waitKey()
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Feature-based image matching sample.
Note, that you will need the https://github.com/opencv/opencv_contrib repo for SIFT and SURF
USAGE
find_obj.py [--feature=<sift|surf|orb|akaze|brisk>[-flann]] [ <image1> <image2> ]
--feature - Feature to use. Can be sift, surf, orb or brisk. Append '-flann'
to feature name to use Flann-based matcher instead bruteforce.
Press left mouse button on a feature point to see its matching point.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from common import anorm, getsize
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
def init_feature(name):
chunks = name.split('-')
if chunks[0] == 'sift':
detector = cv.xfeatures2d.SIFT_create()
norm = cv.NORM_L2
elif chunks[0] == 'surf':
detector = cv.xfeatures2d.SURF_create(800)
norm = cv.NORM_L2
elif chunks[0] == 'orb':
detector = cv.ORB_create(400)
norm = cv.NORM_HAMMING
elif chunks[0] == 'akaze':
detector = cv.AKAZE_create()
norm = cv.NORM_HAMMING
elif chunks[0] == 'brisk':
detector = cv.BRISK_create()
norm = cv.NORM_HAMMING
else:
return None, None
if 'flann' in chunks:
if norm == cv.NORM_L2:
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else:
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
else:
matcher = cv.BFMatcher(norm)
return detector, matcher
def filter_matches(kp1, kp2, matches, ratio = 0.75):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, list(kp_pairs)
def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2), np.uint8)
vis[:h1, :w1] = img1
vis[:h2, w1:w1+w2] = img2
vis = cv.cvtColor(vis, cv.COLOR_GRAY2BGR)
if H is not None:
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = np.int32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
cv.polylines(vis, [corners], True, (255, 255, 255))
if status is None:
status = np.ones(len(kp_pairs), np.bool_)
p1, p2 = [], [] # python 2 / python 3 change of zip unpacking
for kpp in kp_pairs:
p1.append(np.int32(kpp[0].pt))
p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))
green = (0, 255, 0)
red = (0, 0, 255)
kp_color = (51, 103, 236)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv.circle(vis, (x1, y1), 2, col, -1)
cv.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
cv.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
cv.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
vis0 = vis.copy()
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv.line(vis, (x1, y1), (x2, y2), green)
cv.imshow(win, vis)
def onmouse(event, x, y, flags, param):
cur_vis = vis
if flags & cv.EVENT_FLAG_LBUTTON:
cur_vis = vis0.copy()
r = 8
m = (anorm(np.array(p1) - (x, y)) < r) | (anorm(np.array(p2) - (x, y)) < r)
idxs = np.where(m)[0]
kp1s, kp2s = [], []
for i in idxs:
(x1, y1), (x2, y2) = p1[i], p2[i]
col = (red, green)[status[i][0]]
cv.line(cur_vis, (x1, y1), (x2, y2), col)
kp1, kp2 = kp_pairs[i]
kp1s.append(kp1)
kp2s.append(kp2)
cur_vis = cv.drawKeypoints(cur_vis, kp1s, None, flags=4, color=kp_color)
cur_vis[:,w1:] = cv.drawKeypoints(cur_vis[:,w1:], kp2s, None, flags=4, color=kp_color)
cv.imshow(win, cur_vis)
cv.setMouseCallback(win, onmouse)
return vis
def main():
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
opts = dict(opts)
feature_name = opts.get('--feature', 'brisk')
try:
fn1, fn2 = args
except:
fn1 = 'box.png'
fn2 = 'box_in_scene.png'
img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE)
img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE)
detector, matcher = init_feature(feature_name)
if img1 is None:
print('Failed to load fn1:', fn1)
sys.exit(1)
if img2 is None:
print('Failed to load fn2:', fn2)
sys.exit(1)
if detector is None:
print('unknown feature:', feature_name)
sys.exit(1)
print('using', feature_name)
kp1, desc1 = detector.detectAndCompute(img1, None)
kp2, desc2 = detector.detectAndCompute(img2, None)
print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))
def match_and_draw(win):
print('matching...')
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) >= 4:
H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
_vis = explore_match(win, img1, img2, kp_pairs, status, H)
match_and_draw('find_obj')
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
This module contains some common routines used by other samples.
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
from functools import reduce
import numpy as np
import cv2 as cv
# built-in modules
import os
import itertools as it
from contextlib import contextmanager
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, target, s):
x, y = target
cv.putText(dst, s, (x+1, y+1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv.LINE_AA)
cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv.EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv.EVENT_LBUTTONUP:
self.prev_pt = None
if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv.getTickCount() / cv.getTickFrequency()
@contextmanager
def Timer(msg):
print(msg, '...',)
start = clock()
try:
yield
finally:
print("%.2f ms" % ((clock()-start)*1000))
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
return
if self.drag_start:
if flags & cv.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
if PY3:
output = it.zip_longest(fillvalue=fillvalue, *args)
else:
output = it.izip_longest(fillvalue=fillvalue, *args)
return output
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
if PY3:
img0 = next(imgs)
else:
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv.circle(vis, (int(x), int(y)), 2, color)
|
"""
Stitching sample (advanced)
===========================
Show how to use Stitcher API from python.
"""
# Python 2/3 compatibility
from __future__ import print_function
import argparse
from collections import OrderedDict
import cv2 as cv
import numpy as np
EXPOS_COMP_CHOICES = OrderedDict()
EXPOS_COMP_CHOICES['gain_blocks'] = cv.detail.ExposureCompensator_GAIN_BLOCKS
EXPOS_COMP_CHOICES['gain'] = cv.detail.ExposureCompensator_GAIN
EXPOS_COMP_CHOICES['channel'] = cv.detail.ExposureCompensator_CHANNELS
EXPOS_COMP_CHOICES['channel_blocks'] = cv.detail.ExposureCompensator_CHANNELS_BLOCKS
EXPOS_COMP_CHOICES['no'] = cv.detail.ExposureCompensator_NO
BA_COST_CHOICES = OrderedDict()
BA_COST_CHOICES['ray'] = cv.detail_BundleAdjusterRay
BA_COST_CHOICES['reproj'] = cv.detail_BundleAdjusterReproj
BA_COST_CHOICES['affine'] = cv.detail_BundleAdjusterAffinePartial
BA_COST_CHOICES['no'] = cv.detail_NoBundleAdjuster
FEATURES_FIND_CHOICES = OrderedDict()
try:
FEATURES_FIND_CHOICES['surf'] = cv.xfeatures2d_SURF.create
except AttributeError:
print("SURF not available")
# if SURF not available, ORB is default
FEATURES_FIND_CHOICES['orb'] = cv.ORB.create
try:
FEATURES_FIND_CHOICES['sift'] = cv.xfeatures2d_SIFT.create
except AttributeError:
print("SIFT not available")
try:
FEATURES_FIND_CHOICES['brisk'] = cv.BRISK_create
except AttributeError:
print("BRISK not available")
try:
FEATURES_FIND_CHOICES['akaze'] = cv.AKAZE_create
except AttributeError:
print("AKAZE not available")
SEAM_FIND_CHOICES = OrderedDict()
SEAM_FIND_CHOICES['gc_color'] = cv.detail_GraphCutSeamFinder('COST_COLOR')
SEAM_FIND_CHOICES['gc_colorgrad'] = cv.detail_GraphCutSeamFinder('COST_COLOR_GRAD')
SEAM_FIND_CHOICES['dp_color'] = cv.detail_DpSeamFinder('COLOR')
SEAM_FIND_CHOICES['dp_colorgrad'] = cv.detail_DpSeamFinder('COLOR_GRAD')
SEAM_FIND_CHOICES['voronoi'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM)
SEAM_FIND_CHOICES['no'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
ESTIMATOR_CHOICES = OrderedDict()
ESTIMATOR_CHOICES['homography'] = cv.detail_HomographyBasedEstimator
ESTIMATOR_CHOICES['affine'] = cv.detail_AffineBasedEstimator
WARP_CHOICES = (
'spherical',
'plane',
'affine',
'cylindrical',
'fisheye',
'stereographic',
'compressedPlaneA2B1',
'compressedPlaneA1.5B1',
'compressedPlanePortraitA2B1',
'compressedPlanePortraitA1.5B1',
'paniniA2B1',
'paniniA1.5B1',
'paniniPortraitA2B1',
'paniniPortraitA1.5B1',
'mercator',
'transverseMercator',
)
WAVE_CORRECT_CHOICES = ('horiz', 'no', 'vert',)
BLEND_CHOICES = ('multiband', 'feather', 'no',)
parser = argparse.ArgumentParser(
prog="stitching_detailed.py", description="Rotation model images stitcher"
)
parser.add_argument(
'img_names', nargs='+',
help="Files to stitch", type=str
)
parser.add_argument(
'--try_cuda',
action='store',
default=False,
help="Try to use CUDA. The default value is no. All default values are for CPU mode.",
type=bool, dest='try_cuda'
)
parser.add_argument(
'--work_megapix', action='store', default=0.6,
help="Resolution for image registration step. The default is 0.6 Mpx",
type=float, dest='work_megapix'
)
parser.add_argument(
'--features', action='store', default=list(FEATURES_FIND_CHOICES.keys())[0],
help="Type of features used for images matching. The default is '%s'." % FEATURES_FIND_CHOICES.keys(),
choices=FEATURES_FIND_CHOICES.keys(),
type=str, dest='features'
)
parser.add_argument(
'--matcher', action='store', default='homography',
help="Matcher used for pairwise image matching.",
choices=('homography', 'affine'),
type=str, dest='matcher'
)
parser.add_argument(
'--estimator', action='store', default=list(ESTIMATOR_CHOICES.keys())[0],
help="Type of estimator used for transformation estimation.",
choices=ESTIMATOR_CHOICES.keys(),
type=str, dest='estimator'
)
parser.add_argument(
'--match_conf', action='store',
help="Confidence for feature matching step. The default is 0.3 for ORB and 0.65 for other feature types.",
type=float, dest='match_conf'
)
parser.add_argument(
'--conf_thresh', action='store', default=1.0,
help="Threshold for two images are from the same panorama confidence.The default is 1.0.",
type=float, dest='conf_thresh'
)
parser.add_argument(
'--ba', action='store', default=list(BA_COST_CHOICES.keys())[0],
help="Bundle adjustment cost function. The default is '%s'." % list(BA_COST_CHOICES.keys())[0],
choices=BA_COST_CHOICES.keys(),
type=str, dest='ba'
)
parser.add_argument(
'--ba_refine_mask', action='store', default='xxxxx',
help="Set refinement mask for bundle adjustment. It looks like 'x_xxx', "
"where 'x' means refine respective parameter and '_' means don't refine, "
"and has the following format:<fx><skew><ppx><aspect><ppy>. "
"The default mask is 'xxxxx'. "
"If bundle adjustment doesn't support estimation of selected parameter then "
"the respective flag is ignored.",
type=str, dest='ba_refine_mask'
)
parser.add_argument(
'--wave_correct', action='store', default=WAVE_CORRECT_CHOICES[0],
help="Perform wave effect correction. The default is '%s'" % WAVE_CORRECT_CHOICES[0],
choices=WAVE_CORRECT_CHOICES,
type=str, dest='wave_correct'
)
parser.add_argument(
'--save_graph', action='store', default=None,
help="Save matches graph represented in DOT language to <file_name> file.",
type=str, dest='save_graph'
)
parser.add_argument(
'--warp', action='store', default=WARP_CHOICES[0],
help="Warp surface type. The default is '%s'." % WARP_CHOICES[0],
choices=WARP_CHOICES,
type=str, dest='warp'
)
parser.add_argument(
'--seam_megapix', action='store', default=0.1,
help="Resolution for seam estimation step. The default is 0.1 Mpx.",
type=float, dest='seam_megapix'
)
parser.add_argument(
'--seam', action='store', default=list(SEAM_FIND_CHOICES.keys())[0],
help="Seam estimation method. The default is '%s'." % list(SEAM_FIND_CHOICES.keys())[0],
choices=SEAM_FIND_CHOICES.keys(),
type=str, dest='seam'
)
parser.add_argument(
'--compose_megapix', action='store', default=-1,
help="Resolution for compositing step. Use -1 for original resolution. The default is -1",
type=float, dest='compose_megapix'
)
parser.add_argument(
'--expos_comp', action='store', default=list(EXPOS_COMP_CHOICES.keys())[0],
help="Exposure compensation method. The default is '%s'." % list(EXPOS_COMP_CHOICES.keys())[0],
choices=EXPOS_COMP_CHOICES.keys(),
type=str, dest='expos_comp'
)
parser.add_argument(
'--expos_comp_nr_feeds', action='store', default=1,
help="Number of exposure compensation feed.",
type=np.int32, dest='expos_comp_nr_feeds'
)
parser.add_argument(
'--expos_comp_nr_filtering', action='store', default=2,
help="Number of filtering iterations of the exposure compensation gains.",
type=float, dest='expos_comp_nr_filtering'
)
parser.add_argument(
'--expos_comp_block_size', action='store', default=32,
help="BLock size in pixels used by the exposure compensator. The default is 32.",
type=np.int32, dest='expos_comp_block_size'
)
parser.add_argument(
'--blend', action='store', default=BLEND_CHOICES[0],
help="Blending method. The default is '%s'." % BLEND_CHOICES[0],
choices=BLEND_CHOICES,
type=str, dest='blend'
)
parser.add_argument(
'--blend_strength', action='store', default=5,
help="Blending strength from [0,100] range. The default is 5",
type=np.int32, dest='blend_strength'
)
parser.add_argument(
'--output', action='store', default='result.jpg',
help="The default is 'result.jpg'",
type=str, dest='output'
)
parser.add_argument(
'--timelapse', action='store', default=None,
help="Output warped images separately as frames of a time lapse movie, "
"with 'fixed_' prepended to input file names.",
type=str, dest='timelapse'
)
parser.add_argument(
'--rangewidth', action='store', default=-1,
help="uses range_width to limit number of images to match with.",
type=int, dest='rangewidth'
)
__doc__ += '\n' + parser.format_help()
def get_matcher(args):
try_cuda = args.try_cuda
matcher_type = args.matcher
if args.match_conf is None:
if args.features == 'orb':
match_conf = 0.3
else:
match_conf = 0.65
else:
match_conf = args.match_conf
range_width = args.rangewidth
if matcher_type == "affine":
matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf)
elif range_width == -1:
matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf)
else:
matcher = cv.detail.BestOf2NearestRangeMatcher_create(range_width, try_cuda, match_conf)
return matcher
def get_compensator(args):
expos_comp_type = EXPOS_COMP_CHOICES[args.expos_comp]
expos_comp_nr_feeds = args.expos_comp_nr_feeds
expos_comp_block_size = args.expos_comp_block_size
# expos_comp_nr_filtering = args.expos_comp_nr_filtering
if expos_comp_type == cv.detail.ExposureCompensator_CHANNELS:
compensator = cv.detail_ChannelsCompensator(expos_comp_nr_feeds)
# compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
elif expos_comp_type == cv.detail.ExposureCompensator_CHANNELS_BLOCKS:
compensator = cv.detail_BlocksChannelsCompensator(
expos_comp_block_size, expos_comp_block_size,
expos_comp_nr_feeds
)
# compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
else:
compensator = cv.detail.ExposureCompensator_createDefault(expos_comp_type)
return compensator
def main():
args = parser.parse_args()
img_names = args.img_names
print(img_names)
work_megapix = args.work_megapix
seam_megapix = args.seam_megapix
compose_megapix = args.compose_megapix
conf_thresh = args.conf_thresh
ba_refine_mask = args.ba_refine_mask
wave_correct = args.wave_correct
if wave_correct == 'no':
do_wave_correct = False
else:
do_wave_correct = True
if args.save_graph is None:
save_graph = False
else:
save_graph = True
warp_type = args.warp
blend_type = args.blend
blend_strength = args.blend_strength
result_name = args.output
if args.timelapse is not None:
timelapse = True
if args.timelapse == "as_is":
timelapse_type = cv.detail.Timelapser_AS_IS
elif args.timelapse == "crop":
timelapse_type = cv.detail.Timelapser_CROP
else:
print("Bad timelapse method")
exit()
else:
timelapse = False
finder = FEATURES_FIND_CHOICES[args.features]()
seam_work_aspect = 1
full_img_sizes = []
features = []
images = []
is_work_scale_set = False
is_seam_scale_set = False
is_compose_scale_set = False
for name in img_names:
full_img = cv.imread(cv.samples.findFile(name))
if full_img is None:
print("Cannot read image ", name)
exit()
full_img_sizes.append((full_img.shape[1], full_img.shape[0]))
if work_megapix < 0:
img = full_img
work_scale = 1
is_work_scale_set = True
else:
if is_work_scale_set is False:
work_scale = min(1.0, np.sqrt(work_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
is_work_scale_set = True
img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT)
if is_seam_scale_set is False:
seam_scale = min(1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
seam_work_aspect = seam_scale / work_scale
is_seam_scale_set = True
img_feat = cv.detail.computeImageFeatures2(finder, img)
features.append(img_feat)
img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT)
images.append(img)
matcher = get_matcher(args)
p = matcher.apply2(features)
matcher.collectGarbage()
if save_graph:
with open(args.save_graph, 'w') as fh:
fh.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh))
indices = cv.detail.leaveBiggestComponent(features, p, 0.3)
img_subset = []
img_names_subset = []
full_img_sizes_subset = []
for i in range(len(indices)):
img_names_subset.append(img_names[indices[i, 0]])
img_subset.append(images[indices[i, 0]])
full_img_sizes_subset.append(full_img_sizes[indices[i, 0]])
images = img_subset
img_names = img_names_subset
full_img_sizes = full_img_sizes_subset
num_images = len(img_names)
if num_images < 2:
print("Need more images")
exit()
estimator = ESTIMATOR_CHOICES[args.estimator]()
b, cameras = estimator.apply(features, p, None)
if not b:
print("Homography estimation failed.")
exit()
for cam in cameras:
cam.R = cam.R.astype(np.float32)
adjuster = BA_COST_CHOICES[args.ba]()
adjuster.setConfThresh(1)
refine_mask = np.zeros((3, 3), np.uint8)
if ba_refine_mask[0] == 'x':
refine_mask[0, 0] = 1
if ba_refine_mask[1] == 'x':
refine_mask[0, 1] = 1
if ba_refine_mask[2] == 'x':
refine_mask[0, 2] = 1
if ba_refine_mask[3] == 'x':
refine_mask[1, 1] = 1
if ba_refine_mask[4] == 'x':
refine_mask[1, 2] = 1
adjuster.setRefinementMask(refine_mask)
b, cameras = adjuster.apply(features, p, cameras)
if not b:
print("Camera parameters adjusting failed.")
exit()
focals = []
for cam in cameras:
focals.append(cam.focal)
sorted(focals)
if len(focals) % 2 == 1:
warped_image_scale = focals[len(focals) // 2]
else:
warped_image_scale = (focals[len(focals) // 2] + focals[len(focals) // 2 - 1]) / 2
if do_wave_correct:
rmats = []
for cam in cameras:
rmats.append(np.copy(cam.R))
rmats = cv.detail.waveCorrect(rmats, cv.detail.WAVE_CORRECT_HORIZ)
for idx, cam in enumerate(cameras):
cam.R = rmats[idx]
corners = []
masks_warped = []
images_warped = []
sizes = []
masks = []
for i in range(0, num_images):
um = cv.UMat(255 * np.ones((images[i].shape[0], images[i].shape[1]), np.uint8))
masks.append(um)
warper = cv.PyRotationWarper(warp_type, warped_image_scale * seam_work_aspect) # warper could be nullptr?
for idx in range(0, num_images):
K = cameras[idx].K().astype(np.float32)
swa = seam_work_aspect
K[0, 0] *= swa
K[0, 2] *= swa
K[1, 1] *= swa
K[1, 2] *= swa
corner, image_wp = warper.warp(images[idx], K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT)
corners.append(corner)
sizes.append((image_wp.shape[1], image_wp.shape[0]))
images_warped.append(image_wp)
p, mask_wp = warper.warp(masks[idx], K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT)
masks_warped.append(mask_wp.get())
images_warped_f = []
for img in images_warped:
imgf = img.astype(np.float32)
images_warped_f.append(imgf)
compensator = get_compensator(args)
compensator.feed(corners=corners, images=images_warped, masks=masks_warped)
seam_finder = SEAM_FIND_CHOICES[args.seam]
seam_finder.find(images_warped_f, corners, masks_warped)
compose_scale = 1
corners = []
sizes = []
blender = None
timelapser = None
# https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ?
for idx, name in enumerate(img_names):
full_img = cv.imread(name)
if not is_compose_scale_set:
if compose_megapix > 0:
compose_scale = min(1.0, np.sqrt(compose_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
is_compose_scale_set = True
compose_work_aspect = compose_scale / work_scale
warped_image_scale *= compose_work_aspect
warper = cv.PyRotationWarper(warp_type, warped_image_scale)
for i in range(0, len(img_names)):
cameras[i].focal *= compose_work_aspect
cameras[i].ppx *= compose_work_aspect
cameras[i].ppy *= compose_work_aspect
sz = (full_img_sizes[i][0] * compose_scale, full_img_sizes[i][1] * compose_scale)
K = cameras[i].K().astype(np.float32)
roi = warper.warpRoi(sz, K, cameras[i].R)
corners.append(roi[0:2])
sizes.append(roi[2:4])
if abs(compose_scale - 1) > 1e-1:
img = cv.resize(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale,
interpolation=cv.INTER_LINEAR_EXACT)
else:
img = full_img
_img_size = (img.shape[1], img.shape[0])
K = cameras[idx].K().astype(np.float32)
corner, image_warped = warper.warp(img, K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT)
mask = 255 * np.ones((img.shape[0], img.shape[1]), np.uint8)
p, mask_warped = warper.warp(mask, K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT)
compensator.apply(idx, corners[idx], image_warped, mask_warped)
image_warped_s = image_warped.astype(np.int16)
dilated_mask = cv.dilate(masks_warped[idx], None)
seam_mask = cv.resize(dilated_mask, (mask_warped.shape[1], mask_warped.shape[0]), 0, 0, cv.INTER_LINEAR_EXACT)
mask_warped = cv.bitwise_and(seam_mask, mask_warped)
if blender is None and not timelapse:
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
dst_sz = cv.detail.resultRoi(corners=corners, sizes=sizes)
blend_width = np.sqrt(dst_sz[2] * dst_sz[3]) * blend_strength / 100
if blend_width < 1:
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
elif blend_type == "multiband":
blender = cv.detail_MultiBandBlender()
blender.setNumBands((np.log(blend_width) / np.log(2.) - 1.).astype(np.int))
elif blend_type == "feather":
blender = cv.detail_FeatherBlender()
blender.setSharpness(1. / blend_width)
blender.prepare(dst_sz)
elif timelapser is None and timelapse:
timelapser = cv.detail.Timelapser_createDefault(timelapse_type)
timelapser.initialize(corners, sizes)
if timelapse:
ma_tones = np.ones((image_warped_s.shape[0], image_warped_s.shape[1]), np.uint8)
timelapser.process(image_warped_s, ma_tones, corners[idx])
pos_s = img_names[idx].rfind("/")
if pos_s == -1:
fixed_file_name = "fixed_" + img_names[idx]
else:
fixed_file_name = img_names[idx][:pos_s + 1] + "fixed_" + img_names[idx][pos_s + 1:]
cv.imwrite(fixed_file_name, timelapser.getDst())
else:
blender.feed(cv.UMat(image_warped_s), mask_warped, corners[idx])
if not timelapse:
result = None
result_mask = None
result, result_mask = blender.blend(result, result_mask)
cv.imwrite(result_name, result)
zoom_x = 600.0 / result.shape[1]
dst = cv.normalize(src=result, dst=None, alpha=255., norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
dst = cv.resize(dst, dsize=None, fx=zoom_x, fy=zoom_x)
cv.imshow(result_name, dst)
cv.waitKey()
print("Done")
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
''' An example of Laplacian Pyramid construction and merging.
Level : Intermediate
Usage : python lappyr.py [<video source>]
References:
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.54.299
Alexander Mordvintsev 6/10/12
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
import video
from common import nothing, getsize
def build_lappyr(img, leveln=6, dtype=np.int16):
img = dtype(img)
levels = []
for _i in xrange(leveln-1):
next_img = cv.pyrDown(img)
img1 = cv.pyrUp(next_img, dstsize=getsize(img))
levels.append(img-img1)
img = next_img
levels.append(img)
return levels
def merge_lappyr(levels):
img = levels[-1]
for lev_img in levels[-2::-1]:
img = cv.pyrUp(img, dstsize=getsize(lev_img))
img += lev_img
return np.uint8(np.clip(img, 0, 255))
def main():
import sys
try:
fn = sys.argv[1]
except:
fn = 0
cap = video.create_capture(fn)
leveln = 6
cv.namedWindow('level control')
for i in xrange(leveln):
cv.createTrackbar('%d'%i, 'level control', 5, 50, nothing)
while True:
_ret, frame = cap.read()
pyr = build_lappyr(frame, leveln)
for i in xrange(leveln):
v = int(cv.getTrackbarPos('%d'%i, 'level control') / 5)
pyr[i] *= v
res = merge_lappyr(pyr)
cv.imshow('laplacian pyramid filter', res)
if cv.waitKey(1) == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Stitching sample
================
Show how to use Stitcher API from python in a simple way to stitch panoramas
or scans.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import argparse
import sys
modes = (cv.Stitcher_PANORAMA, cv.Stitcher_SCANS)
parser = argparse.ArgumentParser(prog='stitching.py', description='Stitching sample.')
parser.add_argument('--mode',
type = int, choices = modes, default = cv.Stitcher_PANORAMA,
help = 'Determines configuration of stitcher. The default is `PANORAMA` (%d), '
'mode suitable for creating photo panoramas. Option `SCANS` (%d) is suitable '
'for stitching materials under affine transformation, such as scans.' % modes)
parser.add_argument('--output', default = 'result.jpg',
help = 'Resulting image. The default is `result.jpg`.')
parser.add_argument('img', nargs='+', help = 'input images')
__doc__ += '\n' + parser.format_help()
def main():
args = parser.parse_args()
# read input images
imgs = []
for img_name in args.img:
img = cv.imread(cv.samples.findFile(img_name))
if img is None:
print("can't read image " + img_name)
sys.exit(-1)
imgs.append(img)
stitcher = cv.Stitcher.create(args.mode)
status, pano = stitcher.stitch(imgs)
if status != cv.Stitcher_OK:
print("Can't stitch images, error code = %d" % status)
sys.exit(-1)
cv.imwrite(args.output, pano)
print("stitching completed successfully. %s saved!" % args.output)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Simple "Square Detector" program.
Loads several images sequentially and tries to find squares in each image.
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
def angle_cos(p0, p1, p2):
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_squares(img):
img = cv.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv.Canny(gray, 0, 50, apertureSize=5)
bin = cv.dilate(bin, None)
else:
_retval, bin = cv.threshold(gray, thrs, 255, cv.THRESH_BINARY)
contours, _hierarchy = cv.findContours(bin, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv.arcLength(cnt, True)
cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
squares.append(cnt)
return squares
def main():
from glob import glob
for fn in glob('../data/pic*.png'):
img = cv.imread(fn)
squares = find_squares(img)
cv.drawContours( img, squares, -1, (0, 255, 0), 3 )
cv.imshow('squares', img)
ch = cv.waitKey()
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Lucas-Kanade homography tracker
===============================
Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack
for track initialization and back-tracking for match verification
between frames. Finds homography between reference and current views.
Usage
-----
lk_homography.py [<video_source>]
Keys
----
ESC - exit
SPACE - start tracking
r - toggle RANSAC
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import video
from common import draw_str
from video import presets
lk_params = dict( winSize = (19, 19),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 1000,
qualityLevel = 0.01,
minDistance = 8,
blockSize = 19 )
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
status = d < back_threshold
return p1, status
green = (0, 255, 0)
red = (0, 0, 255)
class App:
def __init__(self, video_src):
self.cam = self.cam = video.create_capture(video_src, presets['book'])
self.p0 = None
self.use_ransac = True
def run(self):
while True:
_ret, frame = self.cam.read()
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
vis = frame.copy()
if self.p0 is not None:
p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)
self.p1 = p2[trace_status].copy()
self.p0 = self.p0[trace_status].copy()
self.gray1 = frame_gray
if len(self.p0) < 4:
self.p0 = None
continue
H, status = cv.findHomography(self.p0, self.p1, (0, cv.RANSAC)[self.use_ransac], 10.0)
h, w = frame.shape[:2]
overlay = cv.warpPerspective(self.frame0, H, (w, h))
vis = cv.addWeighted(vis, 0.5, overlay, 0.5, 0.0)
for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
if good:
cv.line(vis, (x0, y0), (x1, y1), (0, 128, 0))
cv.circle(vis, (x1, y1), 2, (red, green)[good], -1)
draw_str(vis, (20, 20), 'track count: %d' % len(self.p1))
if self.use_ransac:
draw_str(vis, (20, 40), 'RANSAC')
else:
p = cv.goodFeaturesToTrack(frame_gray, **feature_params)
if p is not None:
for x, y in p[:,0]:
cv.circle(vis, (x, y), 2, green, -1)
draw_str(vis, (20, 20), 'feature count: %d' % len(p))
cv.imshow('lk_homography', vis)
ch = cv.waitKey(1)
if ch == 27:
break
if ch == ord(' '):
self.frame0 = frame.copy()
self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params)
if self.p0 is not None:
self.p1 = self.p0
self.gray0 = frame_gray
self.gray1 = frame_gray
if ch == ord('r'):
self.use_ransac = not self.use_ransac
def main():
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
SVM and KNearest digit recognition.
Sample loads a dataset of handwritten digits from 'digits.png'.
Then it trains a SVM and KNearest classifiers on it and evaluates
their accuracy.
Following preprocessing is applied to the dataset:
- Moment-based image deskew (see deskew())
- Digit images are split into 4 10x10 cells and 16-bin
histogram of oriented gradients is computed for each
cell
- Transform histograms to space with Hellinger metric (see [1] (RootSIFT))
[1] R. Arandjelovic, A. Zisserman
"Three things everyone should know to improve object retrieval"
http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf
Usage:
digits.py
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# built-in modules
from multiprocessing.pool import ThreadPool
from numpy.linalg import norm
# local modules
from common import clock, mosaic
SZ = 20 # size of each digit is SZ x SZ
CLASS_N = 10
DIGITS_FN = 'digits.png'
def split2d(img, cell_size, flatten=True):
h, w = img.shape[:2]
sx, sy = cell_size
cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
cells = np.array(cells)
if flatten:
cells = cells.reshape(-1, sy, sx)
return cells
def load_digits(fn):
fn = cv.samples.findFile(fn)
print('loading "%s" ...' % fn)
digits_img = cv.imread(fn, cv.IMREAD_GRAYSCALE)
digits = split2d(digits_img, (SZ, SZ))
labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N)
return digits, labels
def deskew(img):
m = cv.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR)
return img
class KNearest(object):
def __init__(self, k = 3):
self.k = k
self.model = cv.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k)
return results.ravel()
def load(self, fn):
self.model = cv.ml.KNearest_load(fn)
def save(self, fn):
self.model.save(fn)
class SVM(object):
def __init__(self, C = 1, gamma = 0.5):
self.model = cv.ml.SVM_create()
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv.ml.SVM_RBF)
self.model.setType(cv.ml.SVM_C_SVC)
def train(self, samples, responses):
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
return self.model.predict(samples)[1].ravel()
def load(self, fn):
self.model = cv.ml.SVM_load(fn)
def save(self, fn):
self.model.save(fn)
def evaluate_model(model, digits, samples, labels):
resp = model.predict(samples)
err = (labels != resp).mean()
print('error: %.2f %%' % (err*100))
confusion = np.zeros((10, 10), np.int32)
for i, j in zip(labels, resp):
confusion[i, int(j)] += 1
print('confusion matrix:')
print(confusion)
print()
vis = []
for img, flag in zip(digits, resp == labels):
img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
if not flag:
img[...,:2] = 0
vis.append(img)
return mosaic(25, vis)
def preprocess_simple(digits):
return np.float32(digits).reshape(-1, SZ*SZ) / 255.0
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv.Sobel(img, cv.CV_32F, 1, 0)
gy = cv.Sobel(img, cv.CV_32F, 0, 1)
mag, ang = cv.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
samples.append(hist)
return np.float32(samples)
if __name__ == '__main__':
print(__doc__)
digits, labels = load_digits(DIGITS_FN)
print('preprocessing...')
# shuffle digits
rand = np.random.RandomState(321)
shuffle = rand.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = list(map(deskew, digits))
samples = preprocess_hog(digits2)
train_n = int(0.9*len(samples))
cv.imshow('test set', mosaic(25, digits[train_n:]))
digits_train, digits_test = np.split(digits2, [train_n])
samples_train, samples_test = np.split(samples, [train_n])
labels_train, labels_test = np.split(labels, [train_n])
print('training KNearest...')
model = KNearest(k=4)
model.train(samples_train, labels_train)
vis = evaluate_model(model, digits_test, samples_test, labels_test)
cv.imshow('KNearest test', vis)
print('training SVM...')
model = SVM(C=2.67, gamma=5.383)
model.train(samples_train, labels_train)
vis = evaluate_model(model, digits_test, samples_test, labels_test)
cv.imshow('SVM test', vis)
print('saving SVM as "digits_svm.dat"...')
model.save('digits_svm.dat')
cv.waitKey(0)
|
#!/usr/bin/env python
'''
===============================================================================
Interactive Image Segmentation using GrabCut algorithm.
This sample shows interactive image segmentation using grabcut algorithm.
USAGE:
python grabcut.py <filename>
README FIRST:
Two windows will show up, one for input and one for output.
At first, in input window, draw a rectangle around the object using the
right mouse button. Then press 'n' to segment the object (once or a few times)
For any finer touch-ups, you can press any of the keys below and draw lines on
the areas you want. Then again press 'n' to update the output.
Key '0' - To select areas of sure background
Key '1' - To select areas of sure foreground
Key '2' - To select areas of probable background
Key '3' - To select areas of probable foreground
Key 'n' - To update the segmentation
Key 'r' - To reset the setup
Key 's' - To save the results
===============================================================================
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
class App():
BLUE = [255,0,0] # rectangle color
RED = [0,0,255] # PR BG
GREEN = [0,255,0] # PR FG
BLACK = [0,0,0] # sure BG
WHITE = [255,255,255] # sure FG
DRAW_BG = {'color' : BLACK, 'val' : 0}
DRAW_FG = {'color' : WHITE, 'val' : 1}
DRAW_PR_BG = {'color' : RED, 'val' : 2}
DRAW_PR_FG = {'color' : GREEN, 'val' : 3}
# setting up flags
rect = (0,0,1,1)
drawing = False # flag for drawing curves
rectangle = False # flag for drawing rect
rect_over = False # flag to check if rect drawn
rect_or_mask = 100 # flag for selecting rect or mask mode
value = DRAW_FG # drawing initialized to FG
thickness = 3 # brush thickness
def onmouse(self, event, x, y, flags, param):
# Draw Rectangle
if event == cv.EVENT_RBUTTONDOWN:
self.rectangle = True
self.ix, self.iy = x,y
elif event == cv.EVENT_MOUSEMOVE:
if self.rectangle == True:
self.img = self.img2.copy()
cv.rectangle(self.img, (self.ix, self.iy), (x, y), self.BLUE, 2)
self.rect = (min(self.ix, x), min(self.iy, y), abs(self.ix - x), abs(self.iy - y))
self.rect_or_mask = 0
elif event == cv.EVENT_RBUTTONUP:
self.rectangle = False
self.rect_over = True
cv.rectangle(self.img, (self.ix, self.iy), (x, y), self.BLUE, 2)
self.rect = (min(self.ix, x), min(self.iy, y), abs(self.ix - x), abs(self.iy - y))
self.rect_or_mask = 0
print(" Now press the key 'n' a few times until no further change \n")
# draw touchup curves
if event == cv.EVENT_LBUTTONDOWN:
if self.rect_over == False:
print("first draw rectangle \n")
else:
self.drawing = True
cv.circle(self.img, (x,y), self.thickness, self.value['color'], -1)
cv.circle(self.mask, (x,y), self.thickness, self.value['val'], -1)
elif event == cv.EVENT_MOUSEMOVE:
if self.drawing == True:
cv.circle(self.img, (x, y), self.thickness, self.value['color'], -1)
cv.circle(self.mask, (x, y), self.thickness, self.value['val'], -1)
elif event == cv.EVENT_LBUTTONUP:
if self.drawing == True:
self.drawing = False
cv.circle(self.img, (x, y), self.thickness, self.value['color'], -1)
cv.circle(self.mask, (x, y), self.thickness, self.value['val'], -1)
def run(self):
# Loading images
if len(sys.argv) == 2:
filename = sys.argv[1] # for drawing purposes
else:
print("No input image given, so loading default image, lena.jpg \n")
print("Correct Usage: python grabcut.py <filename> \n")
filename = 'lena.jpg'
self.img = cv.imread(cv.samples.findFile(filename))
self.img2 = self.img.copy() # a copy of original image
self.mask = np.zeros(self.img.shape[:2], dtype = np.uint8) # mask initialized to PR_BG
self.output = np.zeros(self.img.shape, np.uint8) # output image to be shown
# input and output windows
cv.namedWindow('output')
cv.namedWindow('input')
cv.setMouseCallback('input', self.onmouse)
cv.moveWindow('input', self.img.shape[1]+10,90)
print(" Instructions: \n")
print(" Draw a rectangle around the object using right mouse button \n")
while(1):
cv.imshow('output', self.output)
cv.imshow('input', self.img)
k = cv.waitKey(1)
# key bindings
if k == 27: # esc to exit
break
elif k == ord('0'): # BG drawing
print(" mark background regions with left mouse button \n")
self.value = self.DRAW_BG
elif k == ord('1'): # FG drawing
print(" mark foreground regions with left mouse button \n")
self.value = self.DRAW_FG
elif k == ord('2'): # PR_BG drawing
self.value = self.DRAW_PR_BG
elif k == ord('3'): # PR_FG drawing
self.value = self.DRAW_PR_FG
elif k == ord('s'): # save image
bar = np.zeros((self.img.shape[0], 5, 3), np.uint8)
res = np.hstack((self.img2, bar, self.img, bar, self.output))
cv.imwrite('grabcut_output.png', res)
print(" Result saved as image \n")
elif k == ord('r'): # reset everything
print("resetting \n")
self.rect = (0,0,1,1)
self.drawing = False
self.rectangle = False
self.rect_or_mask = 100
self.rect_over = False
self.value = self.DRAW_FG
self.img = self.img2.copy()
self.mask = np.zeros(self.img.shape[:2], dtype = np.uint8) # mask initialized to PR_BG
self.output = np.zeros(self.img.shape, np.uint8) # output image to be shown
elif k == ord('n'): # segment the image
print(""" For finer touchups, mark foreground and background after pressing keys 0-3
and again press 'n' \n""")
try:
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
if (self.rect_or_mask == 0): # grabcut with rect
cv.grabCut(self.img2, self.mask, self.rect, bgdmodel, fgdmodel, 1, cv.GC_INIT_WITH_RECT)
self.rect_or_mask = 1
elif (self.rect_or_mask == 1): # grabcut with mask
cv.grabCut(self.img2, self.mask, self.rect, bgdmodel, fgdmodel, 1, cv.GC_INIT_WITH_MASK)
except:
import traceback
traceback.print_exc()
mask2 = np.where((self.mask==1) + (self.mask==3), 255, 0).astype('uint8')
self.output = cv.bitwise_and(self.img2, self.img2, mask=mask2)
print('Done')
if __name__ == '__main__':
print(__doc__)
App().run()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Affine invariant feature-based image matching sample.
This sample is similar to find_obj.py, but uses the affine transformation
space sampling technique, called ASIFT [1]. While the original implementation
is based on SIFT, you can try to use SURF or ORB detectors instead. Homography RANSAC
is used to reject outliers. Threading is used for faster affine sampling.
[1] http://www.ipol.im/pub/algo/my_affine_sift/
USAGE
asift.py [--feature=<sift|surf|orb|brisk>[-flann]] [ <image1> <image2> ]
--feature - Feature to use. Can be sift, surf, orb or brisk. Append '-flann'
to feature name to use Flann-based matcher instead bruteforce.
Press left mouse button on a feature point to see its matching point.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# built-in modules
import itertools as it
from multiprocessing.pool import ThreadPool
# local modules
from common import Timer
from find_obj import init_feature, filter_matches, explore_match
def affine_skew(tilt, phi, img, mask=None):
'''
affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai
Ai - is an affine transform matrix from skew_img to img
'''
h, w = img.shape[:2]
if mask is None:
mask = np.zeros((h, w), np.uint8)
mask[:] = 255
A = np.float32([[1, 0, 0], [0, 1, 0]])
if phi != 0.0:
phi = np.deg2rad(phi)
s, c = np.sin(phi), np.cos(phi)
A = np.float32([[c,-s], [ s, c]])
corners = [[0, 0], [w, 0], [w, h], [0, h]]
tcorners = np.int32( np.dot(corners, A.T) )
x, y, w, h = cv.boundingRect(tcorners.reshape(1,-1,2))
A = np.hstack([A, [[-x], [-y]]])
img = cv.warpAffine(img, A, (w, h), flags=cv.INTER_LINEAR, borderMode=cv.BORDER_REPLICATE)
if tilt != 1.0:
s = 0.8*np.sqrt(tilt*tilt-1)
img = cv.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
img = cv.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv.INTER_NEAREST)
A[0] /= tilt
if phi != 0.0 or tilt != 1.0:
h, w = img.shape[:2]
mask = cv.warpAffine(mask, A, (w, h), flags=cv.INTER_NEAREST)
Ai = cv.invertAffineTransform(A)
return img, mask, Ai
def affine_detect(detector, img, mask=None, pool=None):
'''
affine_detect(detector, img, mask=None, pool=None) -> keypoints, descrs
Apply a set of affine transformations to the image, detect keypoints and
reproject them into initial image coordinates.
See http://www.ipol.im/pub/algo/my_affine_sift/ for the details.
ThreadPool object may be passed to speedup the computation.
'''
params = [(1.0, 0.0)]
for t in 2**(0.5*np.arange(1,6)):
for phi in np.arange(0, 180, 72.0 / t):
params.append((t, phi))
def f(p):
t, phi = p
timg, tmask, Ai = affine_skew(t, phi, img)
keypoints, descrs = detector.detectAndCompute(timg, tmask)
for kp in keypoints:
x, y = kp.pt
kp.pt = tuple( np.dot(Ai, (x, y, 1)) )
if descrs is None:
descrs = []
return keypoints, descrs
keypoints, descrs = [], []
if pool is None:
ires = it.imap(f, params)
else:
ires = pool.imap(f, params)
for i, (k, d) in enumerate(ires):
print('affine sampling: %d / %d\r' % (i+1, len(params)), end='')
keypoints.extend(k)
descrs.extend(d)
print()
return keypoints, np.array(descrs)
def main():
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
opts = dict(opts)
feature_name = opts.get('--feature', 'brisk-flann')
try:
fn1, fn2 = args
except:
fn1 = 'aero1.jpg'
fn2 = 'aero3.jpg'
img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE)
img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE)
detector, matcher = init_feature(feature_name)
if img1 is None:
print('Failed to load fn1:', fn1)
sys.exit(1)
if img2 is None:
print('Failed to load fn2:', fn2)
sys.exit(1)
if detector is None:
print('unknown feature:', feature_name)
sys.exit(1)
print('using', feature_name)
pool=ThreadPool(processes = cv.getNumberOfCPUs())
kp1, desc1 = affine_detect(detector, img1, pool=pool)
kp2, desc2 = affine_detect(detector, img2, pool=pool)
print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))
def match_and_draw(win):
with Timer('matching'):
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) >= 4:
H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
# do not draw outliers (there will be a lot of them)
kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
explore_match(win, img1, img2, kp_pairs, None, H)
match_and_draw('affine find_obj')
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
camera calibration for distorted images with chess board samples
reads distorted images, calculates the calibration and write undistorted images
usage:
calibrate.py [--debug <output path>] [--square_size] [<image mask>]
default values:
--debug: ./output/
--square_size: 1.0
<image mask> defaults to ../data/left*.jpg
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# local modules
from common import splitfn
# built-in modules
import os
def main():
import sys
import getopt
from glob import glob
args, img_mask = getopt.getopt(sys.argv[1:], '', ['debug=', 'square_size=', 'threads='])
args = dict(args)
args.setdefault('--debug', './output/')
args.setdefault('--square_size', 1.0)
args.setdefault('--threads', 4)
if not img_mask:
img_mask = '../data/left??.jpg' # default
else:
img_mask = img_mask[0]
img_names = glob(img_mask)
debug_dir = args.get('--debug')
if debug_dir and not os.path.isdir(debug_dir):
os.mkdir(debug_dir)
square_size = float(args.get('--square_size'))
pattern_size = (9, 6)
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_points = []
h, w = cv.imread(img_names[0], cv.IMREAD_GRAYSCALE).shape[:2] # TODO: use imquery call to retrieve results
def processImage(fn):
print('processing %s... ' % fn)
img = cv.imread(fn, 0)
if img is None:
print("Failed to load", fn)
return None
assert w == img.shape[1] and h == img.shape[0], ("size: %d x %d ... " % (img.shape[1], img.shape[0]))
found, corners = cv.findChessboardCorners(img, pattern_size)
if found:
term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)
cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
cv.drawChessboardCorners(vis, pattern_size, corners, found)
_path, name, _ext = splitfn(fn)
outfile = os.path.join(debug_dir, name + '_chess.png')
cv.imwrite(outfile, vis)
if not found:
print('chessboard not found')
return None
print(' %s... OK' % fn)
return (corners.reshape(-1, 2), pattern_points)
threads_num = int(args.get('--threads'))
if threads_num <= 1:
chessboards = [processImage(fn) for fn in img_names]
else:
print("Run with %d threads..." % threads_num)
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(threads_num)
chessboards = pool.map(processImage, img_names)
chessboards = [x for x in chessboards if x is not None]
for (corners, pattern_points) in chessboards:
img_points.append(corners)
obj_points.append(pattern_points)
# calculate camera distortion
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), None, None)
print("\nRMS:", rms)
print("camera matrix:\n", camera_matrix)
print("distortion coefficients: ", dist_coefs.ravel())
# undistort the image with the calibration
print('')
for fn in img_names if debug_dir else []:
_path, name, _ext = splitfn(fn)
img_found = os.path.join(debug_dir, name + '_chess.png')
outfile = os.path.join(debug_dir, name + '_undistorted.png')
img = cv.imread(img_found)
if img is None:
continue
h, w = img.shape[:2]
newcameramtx, roi = cv.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h))
dst = cv.undistort(img, camera_matrix, dist_coefs, None, newcameramtx)
# crop and save the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
print('Undistorted image written to: %s' % outfile)
cv.imwrite(outfile, dst)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Multiscale Turing Patterns generator
====================================
Inspired by http://www.jonathanmccabe.com/Cyclic_Symmetric_Multi-Scale_Turing_Patterns.pdf
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
from common import draw_str
import getopt, sys
from itertools import count
help_message = '''
USAGE: turing.py [-o <output.avi>]
Press ESC to stop.
'''
def main():
print(help_message)
w, h = 512, 512
args, _args_list = getopt.getopt(sys.argv[1:], 'o:', [])
args = dict(args)
out = None
if '-o' in args:
fn = args['-o']
out = cv.VideoWriter(args['-o'], cv.VideoWriter_fourcc(*'DIB '), 30.0, (w, h), False)
print('writing %s ...' % fn)
a = np.zeros((h, w), np.float32)
cv.randu(a, np.array([0]), np.array([1]))
def process_scale(a_lods, lod):
d = a_lods[lod] - cv.pyrUp(a_lods[lod+1])
for _i in xrange(lod):
d = cv.pyrUp(d)
v = cv.GaussianBlur(d*d, (3, 3), 0)
return np.sign(d), v
scale_num = 6
for frame_i in count():
a_lods = [a]
for i in xrange(scale_num):
a_lods.append(cv.pyrDown(a_lods[-1]))
ms, vs = [], []
for i in xrange(1, scale_num):
m, v = process_scale(a_lods, i)
ms.append(m)
vs.append(v)
mi = np.argmin(vs, 0)
a += np.choose(mi, ms) * 0.025
a = (a-a.min()) / a.ptp()
if out:
out.write(a)
vis = a.copy()
draw_str(vis, (20, 20), 'frame %d' % frame_i)
cv.imshow('a', vis)
if cv.waitKey(5) == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from numpy import pi, sin, cos
defaultSize = 512
class TestSceneRender():
def __init__(self, bgImg = None, fgImg = None,
deformation = False, speed = 0.25, **params):
self.time = 0.0
self.timeStep = 1.0 / 30.0
self.foreground = fgImg
self.deformation = deformation
self.speed = speed
if bgImg is not None:
self.sceneBg = bgImg.copy()
else:
self.sceneBg = np.zeros(defaultSize, defaultSize, np.uint8)
self.w = self.sceneBg.shape[0]
self.h = self.sceneBg.shape[1]
if fgImg is not None:
self.foreground = fgImg.copy()
self.center = self.currentCenter = (int(self.w/2 - fgImg.shape[0]/2), int(self.h/2 - fgImg.shape[1]/2))
self.xAmpl = self.sceneBg.shape[0] - (self.center[0] + fgImg.shape[0])
self.yAmpl = self.sceneBg.shape[1] - (self.center[1] + fgImg.shape[1])
self.initialRect = np.array([ (self.h/2, self.w/2), (self.h/2, self.w/2 + self.w/10),
(self.h/2 + self.h/10, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2)]).astype(int)
self.currentRect = self.initialRect
def getXOffset(self, time):
return int( self.xAmpl*cos(time*self.speed))
def getYOffset(self, time):
return int(self.yAmpl*sin(time*self.speed))
def setInitialRect(self, rect):
self.initialRect = rect
def getRectInTime(self, time):
if self.foreground is not None:
tmp = np.array(self.center) + np.array((self.getXOffset(time), self.getYOffset(time)))
x0, y0 = tmp
x1, y1 = tmp + self.foreground.shape[0:2]
return np.array([y0, x0, y1, x1])
else:
x0, y0 = self.initialRect[0] + np.array((self.getXOffset(time), self.getYOffset(time)))
x1, y1 = self.initialRect[2] + np.array((self.getXOffset(time), self.getYOffset(time)))
return np.array([y0, x0, y1, x1])
def getCurrentRect(self):
if self.foreground is not None:
x0 = self.currentCenter[0]
y0 = self.currentCenter[1]
x1 = self.currentCenter[0] + self.foreground.shape[0]
y1 = self.currentCenter[1] + self.foreground.shape[1]
return np.array([y0, x0, y1, x1])
else:
x0, y0 = self.currentRect[0]
x1, y1 = self.currentRect[2]
return np.array([x0, y0, x1, y1])
def getNextFrame(self):
img = self.sceneBg.copy()
if self.foreground is not None:
self.currentCenter = (self.center[0] + self.getXOffset(self.time), self.center[1] + self.getYOffset(self.time))
img[self.currentCenter[0]:self.currentCenter[0]+self.foreground.shape[0],
self.currentCenter[1]:self.currentCenter[1]+self.foreground.shape[1]] = self.foreground
else:
self.currentRect = self.initialRect + np.int( 30*cos(self.time*self.speed) + 50*sin(self.time*self.speed))
if self.deformation:
self.currentRect[1:3] += int(self.h/20*cos(self.time))
cv.fillConvexPoly(img, self.currentRect, (0, 0, 255))
self.time += self.timeStep
return img
def resetTime(self):
self.time = 0.0
def main():
backGr = cv.imread(cv.samples.findFile('graf1.png'))
fgr = cv.imread(cv.samples.findFile('box.png'))
render = TestSceneRender(backGr, fgr)
while True:
img = render.getNextFrame()
cv.imshow('img', img)
ch = cv.waitKey(3)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Floodfill sample.
Usage:
floodfill.py [<image>]
Click on the image to set seed point
Keys:
f - toggle floating range
c - toggle 4/8 connectivity
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
class App():
def update(self, dummy=None):
if self.seed_pt is None:
cv.imshow('floodfill', self.img)
return
flooded = self.img.copy()
self.mask[:] = 0
lo = cv.getTrackbarPos('lo', 'floodfill')
hi = cv.getTrackbarPos('hi', 'floodfill')
flags = self.connectivity
if self.fixed_range:
flags |= cv.FLOODFILL_FIXED_RANGE
cv.floodFill(flooded, self.mask, self.seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags)
cv.circle(flooded, self.seed_pt, 2, (0, 0, 255), -1)
cv.imshow('floodfill', flooded)
def onmouse(self, event, x, y, flags, param):
if flags & cv.EVENT_FLAG_LBUTTON:
self.seed_pt = x, y
self.update()
def run(self):
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
self.img = cv.imread(cv.samples.findFile(fn))
if self.img is None:
print('Failed to load image file:', fn)
sys.exit(1)
h, w = self.img.shape[:2]
self.mask = np.zeros((h+2, w+2), np.uint8)
self.seed_pt = None
self.fixed_range = True
self.connectivity = 4
self.update()
cv.setMouseCallback('floodfill', self.onmouse)
cv.createTrackbar('lo', 'floodfill', 20, 255, self.update)
cv.createTrackbar('hi', 'floodfill', 20, 255, self.update)
while True:
ch = cv.waitKey()
if ch == 27:
break
if ch == ord('f'):
self.fixed_range = not self.fixed_range
print('using %s range' % ('floating', 'fixed')[self.fixed_range])
self.update()
if ch == ord('c'):
self.connectivity = 12-self.connectivity
print('connectivity =', self.connectivity)
self.update()
print('Done')
if __name__ == '__main__':
print(__doc__)
App().run()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Sample-launcher application.
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
# local modules
from common import splitfn
# built-in modules
import webbrowser
from glob import glob
from subprocess import Popen
try:
import tkinter as tk # Python 3
from tkinter.scrolledtext import ScrolledText
except ImportError:
import Tkinter as tk # Python 2
from ScrolledText import ScrolledText
#from IPython.Shell import IPShellEmbed
#ipshell = IPShellEmbed()
exclude_list = ['demo', 'common']
class LinkManager:
def __init__(self, text, url_callback = None):
self.text = text
self.text.tag_config("link", foreground="blue", underline=1)
self.text.tag_bind("link", "<Enter>", self._enter)
self.text.tag_bind("link", "<Leave>", self._leave)
self.text.tag_bind("link", "<Button-1>", self._click)
self.url_callback = url_callback
self.reset()
def reset(self):
self.links = {}
def add(self, action):
# add an action to the manager. returns tags to use in
# associated text widget
tag = "link-%d" % len(self.links)
self.links[tag] = action
return "link", tag
def _enter(self, event):
self.text.config(cursor="hand2")
def _leave(self, event):
self.text.config(cursor="")
def _click(self, event):
for tag in self.text.tag_names(tk.CURRENT):
if tag.startswith("link-"):
proc = self.links[tag]
if callable(proc):
proc()
else:
if self.url_callback:
self.url_callback(proc)
class App:
def __init__(self):
root = tk.Tk()
root.title('OpenCV Demo')
self.win = win = tk.PanedWindow(root, orient=tk.HORIZONTAL, sashrelief=tk.RAISED, sashwidth=4)
self.win.pack(fill=tk.BOTH, expand=1)
left = tk.Frame(win)
right = tk.Frame(win)
win.add(left)
win.add(right)
scrollbar = tk.Scrollbar(left, orient=tk.VERTICAL)
self.demos_lb = demos_lb = tk.Listbox(left, yscrollcommand=scrollbar.set)
scrollbar.config(command=demos_lb.yview)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
demos_lb.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.samples = {}
for fn in glob('*.py'):
name = splitfn(fn)[1]
if fn[0] != '_' and name not in exclude_list:
self.samples[name] = fn
for name in sorted(self.samples):
demos_lb.insert(tk.END, name)
demos_lb.bind('<<ListboxSelect>>', self.on_demo_select)
self.cmd_entry = cmd_entry = tk.Entry(right)
cmd_entry.bind('<Return>', self.on_run)
run_btn = tk.Button(right, command=self.on_run, text='Run', width=8)
self.text = text = ScrolledText(right, font=('arial', 12, 'normal'), width = 30, wrap='word')
self.linker = _linker = LinkManager(text, self.on_link)
self.text.tag_config("header1", font=('arial', 14, 'bold'))
self.text.tag_config("header2", font=('arial', 12, 'bold'))
text.config(state='disabled')
text.pack(fill='both', expand=1, side=tk.BOTTOM)
cmd_entry.pack(fill='x', side='left' , expand=1)
run_btn.pack()
def on_link(self, url):
print(url)
webbrowser.open(url)
def on_demo_select(self, evt):
name = self.demos_lb.get( self.demos_lb.curselection()[0] )
fn = self.samples[name]
loc = {}
try:
execfile(fn, loc) # Python 2
except NameError:
exec(open(fn).read(), loc) # Python 3
descr = loc.get('__doc__', 'no-description')
self.linker.reset()
self.text.config(state='normal')
self.text.delete(1.0, tk.END)
self.format_text(descr)
self.text.config(state='disabled')
self.cmd_entry.delete(0, tk.END)
self.cmd_entry.insert(0, fn)
def format_text(self, s):
text = self.text
lines = s.splitlines()
for i, s in enumerate(lines):
s = s.rstrip()
if i == 0 and not s:
continue
if s and s == '='*len(s):
text.tag_add('header1', 'end-2l', 'end-1l')
elif s and s == '-'*len(s):
text.tag_add('header2', 'end-2l', 'end-1l')
else:
text.insert('end', s+'\n')
def add_link(start, end, url):
for tag in self.linker.add(url):
text.tag_add(tag, start, end)
self.match_text(r'http://\S+', add_link)
def match_text(self, pattern, tag_proc, regexp=True):
text = self.text
text.mark_set('matchPos', '1.0')
count = tk.IntVar()
while True:
match_index = text.search(pattern, 'matchPos', count=count, regexp=regexp, stopindex='end')
if not match_index:
break
end_index = text.index( "%s+%sc" % (match_index, count.get()) )
text.mark_set('matchPos', end_index)
if callable(tag_proc):
tag_proc(match_index, end_index, text.get(match_index, end_index))
else:
text.tag_add(tag_proc, match_index, end_index)
def on_run(self, *args):
cmd = self.cmd_entry.get()
print('running:', cmd)
Popen(sys.executable + ' ' + cmd, shell=True)
def run(self):
tk.mainloop()
if __name__ == '__main__':
App().run()
|
#!/usr/bin/env python
'''
prints OpenCV version
Usage:
opencv_version.py [<params>]
params:
--build: print complete build info
--help: print this help
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def main():
import sys
try:
param = sys.argv[1]
except IndexError:
param = ""
if "--build" == param:
print(cv.getBuildInformation())
elif "--help" == param:
print("\t--build\n\t\tprint complete build info")
print("\t--help\n\t\tprint this help")
else:
print("Welcome to OpenCV")
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Video capture sample.
Sample shows how VideoCapture class can be used to acquire video
frames from a camera of a movie file. Also the sample provides
an example of procedural video generation by an object, mimicking
the VideoCapture interface (see Chess class).
'create_capture' is a convenience function for capture creation,
falling back to procedural video in case of error.
Usage:
video.py [--shotdir <shot path>] [source0] [source1] ...'
sourceN is an
- integer number for camera capture
- name of video file
- synth:<params> for procedural video
Synth examples:
synth:bg=lena.jpg:noise=0.1
synth:class=chess:bg=lena.jpg:noise=0.1:size=640x480
Keys:
ESC - exit
SPACE - save current frame to <shot path> directory
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import re
from numpy import pi, sin, cos
# local modules
from tst_scene_render import TestSceneRender
import common
class VideoSynthBase(object):
def __init__(self, size=None, noise=0.0, bg = None, **params):
self.bg = None
self.frame_size = (640, 480)
if bg is not None:
self.bg = cv.imread(cv.samples.findFile(bg))
h, w = self.bg.shape[:2]
self.frame_size = (w, h)
if size is not None:
w, h = map(int, size.split('x'))
self.frame_size = (w, h)
self.bg = cv.resize(self.bg, self.frame_size)
self.noise = float(noise)
def render(self, dst):
pass
def read(self, dst=None):
w, h = self.frame_size
if self.bg is None:
buf = np.zeros((h, w, 3), np.uint8)
else:
buf = self.bg.copy()
self.render(buf)
if self.noise > 0.0:
noise = np.zeros((h, w, 3), np.int8)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
buf = cv.add(buf, noise, dtype=cv.CV_8UC3)
return True, buf
def isOpened(self):
return True
class Book(VideoSynthBase):
def __init__(self, **kw):
super(Book, self).__init__(**kw)
backGr = cv.imread(cv.samples.findFile('graf1.png'))
fgr = cv.imread(cv.samples.findFile('box.png'))
self.render = TestSceneRender(backGr, fgr, speed = 1)
def read(self, dst=None):
noise = np.zeros(self.render.sceneBg.shape, np.int8)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3)
class Cube(VideoSynthBase):
def __init__(self, **kw):
super(Cube, self).__init__(**kw)
self.render = TestSceneRender(cv.imread(cv.samples.findFile('pca_test1.jpg')), deformation = True, speed = 1)
def read(self, dst=None):
noise = np.zeros(self.render.sceneBg.shape, np.int8)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3)
class Chess(VideoSynthBase):
def __init__(self, **kw):
super(Chess, self).__init__(**kw)
w, h = self.frame_size
self.grid_size = sx, sy = 10, 7
white_quads = []
black_quads = []
for i, j in np.ndindex(sy, sx):
q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]]
[white_quads, black_quads][(i + j) % 2].append(q)
self.white_quads = np.float32(white_quads)
self.black_quads = np.float32(black_quads)
fx = 0.9
self.K = np.float64([[fx*w, 0, 0.5*(w-1)],
[0, fx*w, 0.5*(h-1)],
[0.0,0.0, 1.0]])
self.dist_coef = np.float64([-0.2, 0.1, 0, 0])
self.t = 0
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv.fillConvexPoly(img, np.int32(q*4), color, cv.LINE_AA, shift=2)
def render(self, dst):
t = self.t
self.t += 1.0/30.0
sx, sy = self.grid_size
center = np.array([0.5*sx, 0.5*sy, 0.0])
phi = pi/3 + sin(t*3)*pi/8
c, s = cos(phi), sin(phi)
ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2
eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs
target_pos = center + ofs
R, self.tvec = common.lookat(eye_pos, target_pos)
self.rvec = common.mtx2rvec(R)
self.draw_quads(dst, self.white_quads, (245, 245, 245))
self.draw_quads(dst, self.black_quads, (10, 10, 10))
classes = dict(chess=Chess, book=Book, cube=Cube)
presets = dict(
empty = 'synth:',
lena = 'synth:bg=lena.jpg:noise=0.1',
chess = 'synth:class=chess:bg=lena.jpg:noise=0.1:size=640x480',
book = 'synth:class=book:bg=graf1.png:noise=0.1:size=640x480',
cube = 'synth:class=cube:bg=pca_test1.jpg:noise=0.0:size=640x480'
)
def create_capture(source = 0, fallback = presets['chess']):
'''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
'''
source = str(source).strip()
# Win32: handle drive letter ('c:', ...)
source = re.sub(r'(^|=)([a-zA-Z]):([/\\a-zA-Z0-9])', r'\1?disk\2?\3', source)
chunks = source.split(':')
chunks = [re.sub(r'\?disk([a-zA-Z])\?', r'\1:', s) for s in chunks]
source = chunks[0]
try: source = int(source)
except ValueError: pass
params = dict( s.split('=') for s in chunks[1:] )
cap = None
if source == 'synth':
Class = classes.get(params.get('class', None), VideoSynthBase)
try: cap = Class(**params)
except: pass
else:
cap = cv.VideoCapture(source)
if 'size' in params:
w, h = map(int, params['size'].split('x'))
cap.set(cv.CAP_PROP_FRAME_WIDTH, w)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, h)
if cap is None or not cap.isOpened():
print('Warning: unable to open video source: ', source)
if fallback is not None:
return create_capture(fallback, None)
return cap
if __name__ == '__main__':
import sys
import getopt
print(__doc__)
args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=')
args = dict(args)
shotdir = args.get('--shotdir', '.')
if len(sources) == 0:
sources = [ 0 ]
caps = list(map(create_capture, sources))
shot_idx = 0
while True:
imgs = []
for i, cap in enumerate(caps):
ret, img = cap.read()
imgs.append(img)
cv.imshow('capture %d' % i, img)
ch = cv.waitKey(1)
if ch == 27:
break
if ch == ord(' '):
for i, img in enumerate(imgs):
fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx)
cv.imwrite(fn, img)
print(fn, 'saved')
shot_idx += 1
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
plots image as logPolar and linearPolar
Usage:
logpolar.py
Keys:
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def main():
import sys
try:
fn = sys.argv[1]
except IndexError:
fn = 'fruits.jpg'
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)
img2 = cv.logPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv.WARP_FILL_OUTLIERS)
img3 = cv.linearPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv.WARP_FILL_OUTLIERS)
cv.imshow('before', img)
cv.imshow('logpolar', img2)
cv.imshow('linearpolar', img3)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/python
'''
This example illustrates how to use Hough Transform to find lines
Usage:
houghlines.py [<image_name>]
image argument defaults to pic1.png
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2 as cv
import numpy as np
import sys
import math
def main():
try:
fn = sys.argv[1]
except IndexError:
fn = 'pic1.png'
src = cv.imread(cv.samples.findFile(fn))
dst = cv.Canny(src, 50, 200)
cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)
if True: # HoughLinesP
lines = cv.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)
a, b, _c = lines.shape
for i in range(a):
cv.line(cdst, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv.LINE_AA)
else: # HoughLines
lines = cv.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0)
if lines is not None:
a, b, _c = lines.shape
for i in range(a):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0, y0 = a*rho, b*rho
pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) )
pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) )
cv.line(cdst, pt1, pt2, (0, 0, 255), 3, cv.LINE_AA)
cv.imshow("detected lines", cdst)
cv.imshow("source", src)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
'''
Text skewness correction
This tutorial demonstrates how to correct the skewness in a text.
The program takes as input a skewed source image and shows non skewed text.
Usage:
python text_skewness_correction.py --image "Image path"
'''
import numpy as np
import cv2 as cv
import sys
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--image", required=True, help="path to input image file")
args = vars(parser.parse_args())
# load the image from disk
image = cv.imread(cv.samples.findFile(args["image"]))
if image is None:
print("can't read image " + args["image"])
sys.exit(-1)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# threshold the image, setting all foreground pixels to
# 255 and all background pixels to 0
thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)[1]
# Applying erode filter to remove random noise
erosion_size = 1
element = cv.getStructuringElement(cv.MORPH_RECT, (2 * erosion_size + 1, 2 * erosion_size + 1), (erosion_size, erosion_size) )
thresh = cv.erode(thresh, element)
coords = cv.findNonZero(thresh)
angle = cv.minAreaRect(coords)[-1]
# the `cv.minAreaRect` function returns values in the
# range [-90, 0) if the angle is less than -45 we need to add 90 to it
if angle < -45:
angle = (90 + angle)
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv.getRotationMatrix2D(center, angle, 1.0)
rotated = cv.warpAffine(image, M, (w, h), flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE)
cv.putText(rotated, "Angle: {:.2f} degrees".format(angle), (10, 30), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# show the output image
print("[INFO] angle: {:.2f}".format(angle))
cv.imshow("Input", image)
cv.imshow("Rotated", rotated)
cv.waitKey(0)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
'''
MOSSE tracking sample
This sample implements correlation-based tracking approach, described in [1].
Usage:
mosse.py [--pause] [<video source>]
--pause - Start with playback paused at the first video frame.
Useful for tracking target selection.
Draw rectangles around objects with a mouse to track them.
Keys:
SPACE - pause video
c - clear targets
[1] David S. Bolme et al. "Visual Object Tracking using Adaptive Correlation Filters"
http://www.cs.colostate.edu/~draper/papers/bolme_cvpr10.pdf
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
from common import draw_str, RectSelector
import video
def rnd_warp(a):
h, w = a.shape[:2]
T = np.zeros((2, 3))
coef = 0.2
ang = (np.random.rand()-0.5)*coef
c, s = np.cos(ang), np.sin(ang)
T[:2, :2] = [[c,-s], [s, c]]
T[:2, :2] += (np.random.rand(2, 2) - 0.5)*coef
c = (w/2, h/2)
T[:,2] = c - np.dot(T[:2, :2], c)
return cv.warpAffine(a, T, (w, h), borderMode = cv.BORDER_REFLECT)
def divSpec(A, B):
Ar, Ai = A[...,0], A[...,1]
Br, Bi = B[...,0], B[...,1]
C = (Ar+1j*Ai)/(Br+1j*Bi)
C = np.dstack([np.real(C), np.imag(C)]).copy()
return C
eps = 1e-5
class MOSSE:
def __init__(self, frame, rect):
x1, y1, x2, y2 = rect
w, h = map(cv.getOptimalDFTSize, [x2-x1, y2-y1])
x1, y1 = (x1+x2-w)//2, (y1+y2-h)//2
self.pos = x, y = x1+0.5*(w-1), y1+0.5*(h-1)
self.size = w, h
img = cv.getRectSubPix(frame, (w, h), (x, y))
self.win = cv.createHanningWindow((w, h), cv.CV_32F)
g = np.zeros((h, w), np.float32)
g[h//2, w//2] = 1
g = cv.GaussianBlur(g, (-1, -1), 2.0)
g /= g.max()
self.G = cv.dft(g, flags=cv.DFT_COMPLEX_OUTPUT)
self.H1 = np.zeros_like(self.G)
self.H2 = np.zeros_like(self.G)
for _i in xrange(128):
a = self.preprocess(rnd_warp(img))
A = cv.dft(a, flags=cv.DFT_COMPLEX_OUTPUT)
self.H1 += cv.mulSpectrums(self.G, A, 0, conjB=True)
self.H2 += cv.mulSpectrums( A, A, 0, conjB=True)
self.update_kernel()
self.update(frame)
def update(self, frame, rate = 0.125):
(x, y), (w, h) = self.pos, self.size
self.last_img = img = cv.getRectSubPix(frame, (w, h), (x, y))
img = self.preprocess(img)
self.last_resp, (dx, dy), self.psr = self.correlate(img)
self.good = self.psr > 8.0
if not self.good:
return
self.pos = x+dx, y+dy
self.last_img = img = cv.getRectSubPix(frame, (w, h), self.pos)
img = self.preprocess(img)
A = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT)
H1 = cv.mulSpectrums(self.G, A, 0, conjB=True)
H2 = cv.mulSpectrums( A, A, 0, conjB=True)
self.H1 = self.H1 * (1.0-rate) + H1 * rate
self.H2 = self.H2 * (1.0-rate) + H2 * rate
self.update_kernel()
@property
def state_vis(self):
f = cv.idft(self.H, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT )
h, w = f.shape
f = np.roll(f, -h//2, 0)
f = np.roll(f, -w//2, 1)
kernel = np.uint8( (f-f.min()) / f.ptp()*255 )
resp = self.last_resp
resp = np.uint8(np.clip(resp/resp.max(), 0, 1)*255)
vis = np.hstack([self.last_img, kernel, resp])
return vis
def draw_state(self, vis):
(x, y), (w, h) = self.pos, self.size
x1, y1, x2, y2 = int(x-0.5*w), int(y-0.5*h), int(x+0.5*w), int(y+0.5*h)
cv.rectangle(vis, (x1, y1), (x2, y2), (0, 0, 255))
if self.good:
cv.circle(vis, (int(x), int(y)), 2, (0, 0, 255), -1)
else:
cv.line(vis, (x1, y1), (x2, y2), (0, 0, 255))
cv.line(vis, (x2, y1), (x1, y2), (0, 0, 255))
draw_str(vis, (x1, y2+16), 'PSR: %.2f' % self.psr)
def preprocess(self, img):
img = np.log(np.float32(img)+1.0)
img = (img-img.mean()) / (img.std()+eps)
return img*self.win
def correlate(self, img):
C = cv.mulSpectrums(cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True)
resp = cv.idft(C, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT)
h, w = resp.shape
_, mval, _, (mx, my) = cv.minMaxLoc(resp)
side_resp = resp.copy()
cv.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1)
smean, sstd = side_resp.mean(), side_resp.std()
psr = (mval-smean) / (sstd+eps)
return resp, (mx-w//2, my-h//2), psr
def update_kernel(self):
self.H = divSpec(self.H1, self.H2)
self.H[...,1] *= -1
class App:
def __init__(self, video_src, paused = False):
self.cap = video.create_capture(video_src)
_, self.frame = self.cap.read()
cv.imshow('frame', self.frame)
self.rect_sel = RectSelector('frame', self.onrect)
self.trackers = []
self.paused = paused
def onrect(self, rect):
frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
tracker = MOSSE(frame_gray, rect)
self.trackers.append(tracker)
def run(self):
while True:
if not self.paused:
ret, self.frame = self.cap.read()
if not ret:
break
frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
for tracker in self.trackers:
tracker.update(frame_gray)
vis = self.frame.copy()
for tracker in self.trackers:
tracker.draw_state(vis)
if len(self.trackers) > 0:
cv.imshow('tracker state', self.trackers[-1].state_vis)
self.rect_sel.draw(vis)
cv.imshow('frame', vis)
ch = cv.waitKey(10)
if ch == 27:
break
if ch == ord(' '):
self.paused = not self.paused
if ch == ord('c'):
self.trackers = []
if __name__ == '__main__':
print (__doc__)
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], '', ['pause'])
opts = dict(opts)
try:
video_src = args[0]
except:
video_src = '0'
App(video_src, paused = '--pause' in opts).run()
|
#!/usr/bin/env python
'''
Simple example of stereo image matching and point cloud generation.
Resulting .ply file cam be easily viewed using MeshLab ( http://meshlab.sourceforge.net/ )
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
def write_ply(fn, verts, colors):
verts = verts.reshape(-1, 3)
colors = colors.reshape(-1, 3)
verts = np.hstack([verts, colors])
with open(fn, 'wb') as f:
f.write((ply_header % dict(vert_num=len(verts))).encode('utf-8'))
np.savetxt(f, verts, fmt='%f %f %f %d %d %d ')
def main():
print('loading images...')
imgL = cv.pyrDown(cv.imread(cv.samples.findFile('aloeL.jpg'))) # downscale images for faster processing
imgR = cv.pyrDown(cv.imread(cv.samples.findFile('aloeR.jpg')))
# disparity range is tuned for 'aloe' image pair
window_size = 3
min_disp = 16
num_disp = 112-min_disp
stereo = cv.StereoSGBM_create(minDisparity = min_disp,
numDisparities = num_disp,
blockSize = 16,
P1 = 8*3*window_size**2,
P2 = 32*3*window_size**2,
disp12MaxDiff = 1,
uniquenessRatio = 10,
speckleWindowSize = 100,
speckleRange = 32
)
print('computing disparity...')
disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0
print('generating 3d point cloud...',)
h, w = imgL.shape[:2]
f = 0.8*w # guess for focal length
Q = np.float32([[1, 0, 0, -0.5*w],
[0,-1, 0, 0.5*h], # turn points 180 deg around x-axis,
[0, 0, 0, -f], # so that y-axis looks up
[0, 0, 1, 0]])
points = cv.reprojectImageTo3D(disp, Q)
colors = cv.cvtColor(imgL, cv.COLOR_BGR2RGB)
mask = disp > disp.min()
out_points = points[mask]
out_colors = colors[mask]
out_fn = 'out.ply'
write_ply(out_fn, out_points, out_colors)
print('%s saved' % out_fn)
cv.imshow('left', imgL)
cv.imshow('disparity', (disp-min_disp)/num_disp)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Video histogram sample to show live histogram of video
Keys:
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# built-in modules
import sys
# local modules
import video
class App():
def set_scale(self, val):
self.hist_scale = val
def run(self):
hsv_map = np.zeros((180, 256, 3), np.uint8)
h, s = np.indices(hsv_map.shape[:2])
hsv_map[:,:,0] = h
hsv_map[:,:,1] = s
hsv_map[:,:,2] = 255
hsv_map = cv.cvtColor(hsv_map, cv.COLOR_HSV2BGR)
cv.imshow('hsv_map', hsv_map)
cv.namedWindow('hist', 0)
self.hist_scale = 10
cv.createTrackbar('scale', 'hist', self.hist_scale, 32, self.set_scale)
try:
fn = sys.argv[1]
except:
fn = 0
cam = video.create_capture(fn, fallback='synth:bg=baboon.jpg:class=chess:noise=0.05')
while True:
_flag, frame = cam.read()
cv.imshow('camera', frame)
small = cv.pyrDown(frame)
hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV)
dark = hsv[...,2] < 32
hsv[dark] = 0
h = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
h = np.clip(h*0.005*self.hist_scale, 0, 1)
vis = hsv_map*h[:,:,np.newaxis] / 255.0
cv.imshow('hist', vis)
ch = cv.waitKey(1)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
App().run()
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Distance transform sample.
Usage:
distrans.py [<image>]
Keys:
ESC - exit
v - toggle voronoi mode
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from common import make_cmap
def main():
import sys
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
fn = cv.samples.findFile(fn)
img = cv.imread(fn, cv.IMREAD_GRAYSCALE)
if img is None:
print('Failed to load fn:', fn)
sys.exit(1)
cm = make_cmap('jet')
need_update = True
voronoi = False
def update(dummy=None):
global need_update
need_update = False
thrs = cv.getTrackbarPos('threshold', 'distrans')
mark = cv.Canny(img, thrs, 3*thrs)
dist, labels = cv.distanceTransformWithLabels(~mark, cv.DIST_L2, 5)
if voronoi:
vis = cm[np.uint8(labels)]
else:
vis = cm[np.uint8(dist*2)]
vis[mark != 0] = 255
cv.imshow('distrans', vis)
def invalidate(dummy=None):
global need_update
need_update = True
cv.namedWindow('distrans')
cv.createTrackbar('threshold', 'distrans', 60, 255, invalidate)
update()
while True:
ch = cv.waitKey(50)
if ch == 27:
break
if ch == ord('v'):
voronoi = not voronoi
print('showing', ['distance', 'voronoi'][voronoi])
update()
if need_update:
update()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
import numpy as np
import cv2 as cv
import argparse
parser = argparse.ArgumentParser(description='This sample demonstrates the camshift algorithm. \
The example file can be downloaded from: \
https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4')
parser.add_argument('image', type=str, help='path to image file')
args = parser.parse_args()
cap = cv.VideoCapture(args.image)
# take first frame of the video
ret,frame = cap.read()
# setup initial location of window
x, y, w, h = 300, 200, 100, 50 # simply hardcoded the values
track_window = (x, y, w, h)
# set up the ROI for tracking
roi = frame[y:y+h, x:x+w]
hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret, frame = cap.read()
if ret == True:
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply camshift to get the new location
ret, track_window = cv.CamShift(dst, track_window, term_crit)
# Draw it on image
pts = cv.boxPoints(ret)
pts = np.int0(pts)
img2 = cv.polylines(frame,[pts],True, 255,2)
cv.imshow('img2',img2)
k = cv.waitKey(30) & 0xff
if k == 27:
break
else:
break
|
import numpy as np
import cv2 as cv
import argparse
parser = argparse.ArgumentParser(description='This sample demonstrates the meanshift algorithm. \
The example file can be downloaded from: \
https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4')
parser.add_argument('image', type=str, help='path to image file')
args = parser.parse_args()
cap = cv.VideoCapture(args.image)
# take first frame of the video
ret,frame = cap.read()
# setup initial location of window
x, y, w, h = 300, 200, 100, 50 # simply hardcoded the values
track_window = (x, y, w, h)
# set up the ROI for tracking
roi = frame[y:y+h, x:x+w]
hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret, frame = cap.read()
if ret == True:
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv.meanShift(dst, track_window, term_crit)
# Draw it on image
x,y,w,h = track_window
img2 = cv.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv.imshow('img2',img2)
k = cv.waitKey(30) & 0xff
if k == 27:
break
else:
break
|
from __future__ import print_function
import cv2 as cv
import argparse
parser = argparse.ArgumentParser(description='This program shows how to use background subtraction methods provided by \
OpenCV. You can process both videos and images.')
parser.add_argument('--input', type=str, help='Path to a video or a sequence of image.', default='vtest.avi')
parser.add_argument('--algo', type=str, help='Background subtraction method (KNN, MOG2).', default='MOG2')
args = parser.parse_args()
## [create]
#create Background Subtractor objects
if args.algo == 'MOG2':
backSub = cv.createBackgroundSubtractorMOG2()
else:
backSub = cv.createBackgroundSubtractorKNN()
## [create]
## [capture]
capture = cv.VideoCapture(cv.samples.findFileOrKeep(args.input))
if not capture.isOpened:
print('Unable to open: ' + args.input)
exit(0)
## [capture]
while True:
ret, frame = capture.read()
if frame is None:
break
## [apply]
#update the background model
fgMask = backSub.apply(frame)
## [apply]
## [display_frame_number]
#get the frame number and write it on the current frame
cv.rectangle(frame, (10, 2), (100,20), (255,255,255), -1)
cv.putText(frame, str(capture.get(cv.CAP_PROP_POS_FRAMES)), (15, 15),
cv.FONT_HERSHEY_SIMPLEX, 0.5 , (0,0,0))
## [display_frame_number]
## [show]
#show the current frame and the fg masks
cv.imshow('Frame', frame)
cv.imshow('FG Mask', fgMask)
## [show]
keyboard = cv.waitKey(30)
if keyboard == 'q' or keyboard == 27:
break
|
import numpy as np
import cv2 as cv
import argparse
parser = argparse.ArgumentParser(description='This sample demonstrates Lucas-Kanade Optical Flow calculation. \
The example file can be downloaded from: \
https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4')
parser.add_argument('image', type=str, help='path to image file')
args = parser.parse_args()
cap = cv.VideoCapture(args.image)
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
p0 = cv.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while(1):
ret,frame = cap.read()
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new, good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv.add(frame,mask)
cv.imshow('frame',img)
k = cv.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
|
import numpy as np
import cv2 as cv
cap = cv.VideoCapture(cv.samples.findFile("vtest.avi"))
ret, frame1 = cap.read()
prvs = cv.cvtColor(frame1,cv.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255
while(1):
ret, frame2 = cap.read()
next = cv.cvtColor(frame2,cv.COLOR_BGR2GRAY)
flow = cv.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv.normalize(mag,None,0,255,cv.NORM_MINMAX)
bgr = cv.cvtColor(hsv,cv.COLOR_HSV2BGR)
cv.imshow('frame2',bgr)
k = cv.waitKey(30) & 0xff
if k == 27:
break
elif k == ord('s'):
cv.imwrite('opticalfb.png',frame2)
cv.imwrite('opticalhsv.png',bgr)
prvs = next
|
from __future__ import print_function
import cv2 as cv
import argparse
## [Load image]
parser = argparse.ArgumentParser(description='Code for Histogram Equalization tutorial.')
parser.add_argument('--input', help='Path to input image.', default='lena.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Load image]
## [Convert to grayscale]
src = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
## [Convert to grayscale]
## [Apply Histogram Equalization]
dst = cv.equalizeHist(src)
## [Apply Histogram Equalization]
## [Display results]
cv.imshow('Source image', src)
cv.imshow('Equalized Image', dst)
## [Display results]
## [Wait until user exits the program]
cv.waitKey()
## [Wait until user exits the program]
|
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
def Hist_and_Backproj(val):
## [initialize]
bins = val
histSize = max(bins, 2)
ranges = [0, 180] # hue_range
## [initialize]
## [Get the Histogram and normalize it]
hist = cv.calcHist([hue], [0], None, [histSize], ranges, accumulate=False)
cv.normalize(hist, hist, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
## [Get the Histogram and normalize it]
## [Get Backprojection]
backproj = cv.calcBackProject([hue], [0], hist, ranges, scale=1)
## [Get Backprojection]
## [Draw the backproj]
cv.imshow('BackProj', backproj)
## [Draw the backproj]
## [Draw the histogram]
w = 400
h = 400
bin_w = int(round(w / histSize))
histImg = np.zeros((h, w, 3), dtype=np.uint8)
for i in range(bins):
cv.rectangle(histImg, (i*bin_w, h), ( (i+1)*bin_w, h - int(round( hist[i]*h/255.0 )) ), (0, 0, 255), cv.FILLED)
cv.imshow('Histogram', histImg)
## [Draw the histogram]
## [Read the image]
parser = argparse.ArgumentParser(description='Code for Back Projection tutorial.')
parser.add_argument('--input', help='Path to input image.')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Read the image]
## [Transform it to HSV]
hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV)
## [Transform it to HSV]
## [Use only the Hue value]
ch = (0, 0)
hue = np.empty(hsv.shape, hsv.dtype)
cv.mixChannels([hsv], [hue], ch)
## [Use only the Hue value]
## [Create Trackbar to enter the number of bins]
window_image = 'Source image'
cv.namedWindow(window_image)
bins = 25
cv.createTrackbar('* Hue bins: ', window_image, bins, 180, Hist_and_Backproj )
Hist_and_Backproj(bins)
## [Create Trackbar to enter the number of bins]
## [Show the image]
cv.imshow(window_image, src)
cv.waitKey()
## [Show the image]
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
low = 20
up = 20
def callback_low(val):
global low
low = val
def callback_up(val):
global up
up = val
def pickPoint(event, x, y, flags, param):
if event != cv.EVENT_LBUTTONDOWN:
return
# Fill and get the mask
seed = (x, y)
newMaskVal = 255
newVal = (120, 120, 120)
connectivity = 8
flags = connectivity + (newMaskVal << 8 ) + cv.FLOODFILL_FIXED_RANGE + cv.FLOODFILL_MASK_ONLY
mask2 = np.zeros((src.shape[0] + 2, src.shape[1] + 2), dtype=np.uint8)
print('low:', low, 'up:', up)
cv.floodFill(src, mask2, seed, newVal, (low, low, low), (up, up, up), flags)
mask = mask2[1:-1,1:-1]
cv.imshow('Mask', mask)
Hist_and_Backproj(mask)
def Hist_and_Backproj(mask):
h_bins = 30
s_bins = 32
histSize = [h_bins, s_bins]
h_range = [0, 180]
s_range = [0, 256]
ranges = h_range + s_range # Concat list
channels = [0, 1]
# Get the Histogram and normalize it
hist = cv.calcHist([hsv], channels, mask, histSize, ranges, accumulate=False)
cv.normalize(hist, hist, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
# Get Backprojection
backproj = cv.calcBackProject([hsv], channels, hist, ranges, scale=1)
# Draw the backproj
cv.imshow('BackProj', backproj)
# Read the image
parser = argparse.ArgumentParser(description='Code for Back Projection tutorial.')
parser.add_argument('--input', help='Path to input image.')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
# Transform it to HSV
hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV)
# Show the image
window_image = 'Source image'
cv.namedWindow(window_image)
cv.imshow(window_image, src)
# Set Trackbars for floodfill thresholds
cv.createTrackbar('Low thresh', window_image, low, 255, callback_low)
cv.createTrackbar('High thresh', window_image, up, 255, callback_up)
# Set a Mouse Callback
cv.setMouseCallback(window_image, pickPoint)
cv.waitKey()
|
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
## [Load image]
parser = argparse.ArgumentParser(description='Code for Histogram Calculation tutorial.')
parser.add_argument('--input', help='Path to input image.', default='lena.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Load image]
## [Separate the image in 3 places ( B, G and R )]
bgr_planes = cv.split(src)
## [Separate the image in 3 places ( B, G and R )]
## [Establish the number of bins]
histSize = 256
## [Establish the number of bins]
## [Set the ranges ( for B,G,R) )]
histRange = (0, 256) # the upper boundary is exclusive
## [Set the ranges ( for B,G,R) )]
## [Set histogram param]
accumulate = False
## [Set histogram param]
## [Compute the histograms]
b_hist = cv.calcHist(bgr_planes, [0], None, [histSize], histRange, accumulate=accumulate)
g_hist = cv.calcHist(bgr_planes, [1], None, [histSize], histRange, accumulate=accumulate)
r_hist = cv.calcHist(bgr_planes, [2], None, [histSize], histRange, accumulate=accumulate)
## [Compute the histograms]
## [Draw the histograms for B, G and R]
hist_w = 512
hist_h = 400
bin_w = int(round( hist_w/histSize ))
histImage = np.zeros((hist_h, hist_w, 3), dtype=np.uint8)
## [Draw the histograms for B, G and R]
## [Normalize the result to ( 0, histImage.rows )]
cv.normalize(b_hist, b_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)
cv.normalize(g_hist, g_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)
cv.normalize(r_hist, r_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)
## [Normalize the result to ( 0, histImage.rows )]
## [Draw for each channel]
for i in range(1, histSize):
cv.line(histImage, ( bin_w*(i-1), hist_h - int(round(b_hist[i-1])) ),
( bin_w*(i), hist_h - int(round(b_hist[i])) ),
( 255, 0, 0), thickness=2)
cv.line(histImage, ( bin_w*(i-1), hist_h - int(round(g_hist[i-1])) ),
( bin_w*(i), hist_h - int(round(g_hist[i])) ),
( 0, 255, 0), thickness=2)
cv.line(histImage, ( bin_w*(i-1), hist_h - int(round(r_hist[i-1])) ),
( bin_w*(i), hist_h - int(round(r_hist[i])) ),
( 0, 0, 255), thickness=2)
## [Draw for each channel]
## [Display]
cv.imshow('Source image', src)
cv.imshow('calcHist Demo', histImage)
cv.waitKey()
## [Display]
|
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
## [Load three images with different environment settings]
parser = argparse.ArgumentParser(description='Code for Histogram Comparison tutorial.')
parser.add_argument('--input1', help='Path to input image 1.')
parser.add_argument('--input2', help='Path to input image 2.')
parser.add_argument('--input3', help='Path to input image 3.')
args = parser.parse_args()
src_base = cv.imread(args.input1)
src_test1 = cv.imread(args.input2)
src_test2 = cv.imread(args.input3)
if src_base is None or src_test1 is None or src_test2 is None:
print('Could not open or find the images!')
exit(0)
## [Load three images with different environment settings]
## [Convert to HSV]
hsv_base = cv.cvtColor(src_base, cv.COLOR_BGR2HSV)
hsv_test1 = cv.cvtColor(src_test1, cv.COLOR_BGR2HSV)
hsv_test2 = cv.cvtColor(src_test2, cv.COLOR_BGR2HSV)
## [Convert to HSV]
## [Convert to HSV half]
hsv_half_down = hsv_base[hsv_base.shape[0]//2:,:]
## [Convert to HSV half]
## [Using 50 bins for hue and 60 for saturation]
h_bins = 50
s_bins = 60
histSize = [h_bins, s_bins]
# hue varies from 0 to 179, saturation from 0 to 255
h_ranges = [0, 180]
s_ranges = [0, 256]
ranges = h_ranges + s_ranges # concat lists
# Use the 0-th and 1-st channels
channels = [0, 1]
## [Using 50 bins for hue and 60 for saturation]
## [Calculate the histograms for the HSV images]
hist_base = cv.calcHist([hsv_base], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_base, hist_base, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
hist_half_down = cv.calcHist([hsv_half_down], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_half_down, hist_half_down, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
hist_test1 = cv.calcHist([hsv_test1], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_test1, hist_test1, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
hist_test2 = cv.calcHist([hsv_test2], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_test2, hist_test2, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
## [Calculate the histograms for the HSV images]
## [Apply the histogram comparison methods]
for compare_method in range(4):
base_base = cv.compareHist(hist_base, hist_base, compare_method)
base_half = cv.compareHist(hist_base, hist_half_down, compare_method)
base_test1 = cv.compareHist(hist_base, hist_test1, compare_method)
base_test2 = cv.compareHist(hist_base, hist_test2, compare_method)
print('Method:', compare_method, 'Perfect, Base-Half, Base-Test(1), Base-Test(2) :',\
base_base, '/', base_half, '/', base_test1, '/', base_test2)
## [Apply the histogram comparison methods]
|
from __future__ import division
import cv2 as cv
import numpy as np
# Snippet code for Operations with images tutorial (not intended to be run)
def load():
# Input/Output
filename = 'img.jpg'
## [Load an image from a file]
img = cv.imread(filename)
## [Load an image from a file]
## [Load an image from a file in grayscale]
img = cv.imread(filename, cv.IMREAD_GRAYSCALE)
## [Load an image from a file in grayscale]
## [Save image]
cv.imwrite(filename, img)
## [Save image]
def access_pixel():
# Accessing pixel intensity values
img = np.empty((4,4,3), np.uint8)
y = 0
x = 0
## [Pixel access 1]
_intensity = img[y,x]
## [Pixel access 1]
## [Pixel access 3]
_blue = img[y,x,0]
_green = img[y,x,1]
_red = img[y,x,2]
## [Pixel access 3]
## [Pixel access 5]
img[y,x] = 128
## [Pixel access 5]
def reference_counting():
# Memory management and reference counting
## [Reference counting 2]
img = cv.imread('image.jpg')
_img1 = np.copy(img)
## [Reference counting 2]
## [Reference counting 3]
img = cv.imread('image.jpg')
_sobelx = cv.Sobel(img, cv.CV_32F, 1, 0)
## [Reference counting 3]
def primitive_operations():
img = np.empty((4,4,3), np.uint8)
## [Set image to black]
img[:] = 0
## [Set image to black]
## [Select ROI]
_smallImg = img[10:110,10:110]
## [Select ROI]
## [BGR to Gray]
img = cv.imread('image.jpg')
_grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
## [BGR to Gray]
src = np.ones((4,4), np.uint8)
## [Convert to CV_32F]
_dst = src.astype(np.float32)
## [Convert to CV_32F]
def visualize_images():
## [imshow 1]
img = cv.imread('image.jpg')
cv.namedWindow('image', cv.WINDOW_AUTOSIZE)
cv.imshow('image', img)
cv.waitKey()
## [imshow 1]
## [imshow 2]
img = cv.imread('image.jpg')
grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
sobelx = cv.Sobel(grey, cv.CV_32F, 1, 0)
# find minimum and maximum intensities
minVal = np.amin(sobelx)
maxVal = np.amax(sobelx)
draw = cv.convertScaleAbs(sobelx, alpha=255.0/(maxVal - minVal), beta=-minVal * 255.0/(maxVal - minVal))
cv.namedWindow('image', cv.WINDOW_AUTOSIZE)
cv.imshow('image', draw)
cv.waitKey()
## [imshow 2]
|
from __future__ import print_function
import cv2 as cv
alpha = 0.5
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
print(''' Simple Linear Blender
-----------------------
* Enter alpha [0.0-1.0]: ''')
input_alpha = float(raw_input().strip())
if 0 <= alpha <= 1:
alpha = input_alpha
# [load]
src1 = cv.imread(cv.samples.findFile('LinuxLogo.jpg'))
src2 = cv.imread(cv.samples.findFile('WindowsLogo.jpg'))
# [load]
if src1 is None:
print("Error loading src1")
exit(-1)
elif src2 is None:
print("Error loading src2")
exit(-1)
# [blend_images]
beta = (1.0 - alpha)
dst = cv.addWeighted(src1, alpha, src2, beta, 0.0)
# [blend_images]
# [display]
cv.imshow('dst', dst)
cv.waitKey(0)
# [display]
cv.destroyAllWindows()
|
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
def help(filename):
print (
'''
{0} shows the usage of the OpenCV serialization functionality. \n\n
usage:\n
python3 {0} outputfile.yml.gz\n\n
The output file may be either in XML, YAML or JSON. You can even compress it\n
by specifying this in its extension like xml.gz yaml.gz etc... With\n
FileStorage you can serialize objects in OpenCV.\n\n
For example: - create a class and have it serialized\n
- use it to read and write matrices.\n
'''.format(filename)
)
class MyData:
A = 97
X = np.pi
name = 'mydata1234'
def __repr__(self):
s = '{ name = ' + self.name + ', X = ' + str(self.X)
s = s + ', A = ' + str(self.A) + '}'
return s
## [inside]
def write(self, fs):
fs.write('MyData','{')
fs.write('A', self.A)
fs.write('X', self.X)
fs.write('name', self.name)
fs.write('MyData','}')
def read(self, node):
if (not node.empty()):
self.A = int(node.getNode('A').real())
self.X = node.getNode('X').real()
self.name = node.getNode('name').string()
else:
self.A = self.X = 0
self.name = ''
## [inside]
def main(argv):
if len(argv) != 2:
help(argv[0])
exit(1)
# write
## [iomati]
R = np.eye(3,3)
T = np.zeros((3,1))
## [iomati]
## [customIOi]
m = MyData()
## [customIOi]
filename = argv[1]
## [open]
s = cv.FileStorage(filename, cv.FileStorage_WRITE)
# or:
# s = cv.FileStorage()
# s.open(filename, cv.FileStorage_WRITE)
## [open]
## [writeNum]
s.write('iterationNr', 100)
## [writeNum]
## [writeStr]
s.write('strings', '[')
s.write('image1.jpg','Awesomeness')
s.write('../data/baboon.jpg',']')
## [writeStr]
## [writeMap]
s.write ('Mapping', '{')
s.write ('One', 1)
s.write ('Two', 2)
s.write ('Mapping', '}')
## [writeMap]
## [iomatw]
s.write ('R_MAT', R)
s.write ('T_MAT', T)
## [iomatw]
## [customIOw]
m.write(s)
## [customIOw]
## [close]
s.release()
## [close]
print ('Write Done.')
# read
print ('\nReading: ')
s = cv.FileStorage()
s.open(filename, cv.FileStorage_READ)
## [readNum]
n = s.getNode('iterationNr')
itNr = int(n.real())
## [readNum]
print (itNr)
if (not s.isOpened()):
print ('Failed to open ', filename, file=sys.stderr)
help(argv[0])
exit(1)
## [readStr]
n = s.getNode('strings')
if (not n.isSeq()):
print ('strings is not a sequence! FAIL', file=sys.stderr)
exit(1)
for i in range(n.size()):
print (n.at(i).string())
## [readStr]
## [readMap]
n = s.getNode('Mapping')
print ('Two',int(n.getNode('Two').real()),'; ')
print ('One',int(n.getNode('One').real()),'\n')
## [readMap]
## [iomat]
R = s.getNode('R_MAT').mat()
T = s.getNode('T_MAT').mat()
## [iomat]
## [customIO]
m.read(s.getNode('MyData'))
## [customIO]
print ('\nR =',R)
print ('T =',T,'\n')
print ('MyData =','\n',m,'\n')
## [nonexist]
print ('Attempt to read NonExisting (should initialize the data structure',
'with its default).')
m.read(s.getNode('NonExisting'))
print ('\nNonExisting =','\n',m)
## [nonexist]
print ('\nTip: Open up',filename,'with a text editor to see the serialized data.')
if __name__ == '__main__':
main(sys.argv)
|
from __future__ import print_function
import sys
import time
import numpy as np
import cv2 as cv
## [basic_method]
def is_grayscale(my_image):
return len(my_image.shape) < 3
def saturated(sum_value):
if sum_value > 255:
sum_value = 255
if sum_value < 0:
sum_value = 0
return sum_value
def sharpen(my_image):
if is_grayscale(my_image):
height, width = my_image.shape
else:
my_image = cv.cvtColor(my_image, cv.CV_8U)
height, width, n_channels = my_image.shape
result = np.zeros(my_image.shape, my_image.dtype)
## [basic_method_loop]
for j in range(1, height - 1):
for i in range(1, width - 1):
if is_grayscale(my_image):
sum_value = 5 * my_image[j, i] - my_image[j + 1, i] - my_image[j - 1, i] \
- my_image[j, i + 1] - my_image[j, i - 1]
result[j, i] = saturated(sum_value)
else:
for k in range(0, n_channels):
sum_value = 5 * my_image[j, i, k] - my_image[j + 1, i, k] \
- my_image[j - 1, i, k] - my_image[j, i + 1, k]\
- my_image[j, i - 1, k]
result[j, i, k] = saturated(sum_value)
## [basic_method_loop]
return result
## [basic_method]
def main(argv):
filename = 'lena.jpg'
img_codec = cv.IMREAD_COLOR
if argv:
filename = sys.argv[1]
if len(argv) >= 2 and sys.argv[2] == "G":
img_codec = cv.IMREAD_GRAYSCALE
src = cv.imread(cv.samples.findFile(filename), img_codec)
if src is None:
print("Can't open image [" + filename + "]")
print("Usage:")
print("mat_mask_operations.py [image_path -- default lena.jpg] [G -- grayscale]")
return -1
cv.namedWindow("Input", cv.WINDOW_AUTOSIZE)
cv.namedWindow("Output", cv.WINDOW_AUTOSIZE)
cv.imshow("Input", src)
t = round(time.time())
dst0 = sharpen(src)
t = (time.time() - t)
print("Hand written function time passed in seconds: %s" % t)
cv.imshow("Output", dst0)
cv.waitKey()
t = time.time()
## [kern]
kernel = np.array([[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]], np.float32) # kernel should be floating point type
## [kern]
## [filter2D]
dst1 = cv.filter2D(src, -1, kernel)
# ddepth = -1, means destination image has depth same as input image
## [filter2D]
t = (time.time() - t)
print("Built-in filter2D time passed in seconds: %s" % t)
cv.imshow("Output", dst1)
cv.waitKey(0)
cv.destroyAllWindows()
return 0
if __name__ == "__main__":
main(sys.argv[1:])
|
from __future__ import print_function
import sys
import cv2 as cv
import numpy as np
def print_help():
print('''
This program demonstrated the use of the discrete Fourier transform (DFT).
The dft of an image is taken and it's power spectrum is displayed.
Usage:
discrete_fourier_transform.py [image_name -- default lena.jpg]''')
def main(argv):
print_help()
filename = argv[0] if len(argv) > 0 else 'lena.jpg'
I = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE)
if I is None:
print('Error opening image')
return -1
## [expand]
rows, cols = I.shape
m = cv.getOptimalDFTSize( rows )
n = cv.getOptimalDFTSize( cols )
padded = cv.copyMakeBorder(I, 0, m - rows, 0, n - cols, cv.BORDER_CONSTANT, value=[0, 0, 0])
## [expand]
## [complex_and_real]
planes = [np.float32(padded), np.zeros(padded.shape, np.float32)]
complexI = cv.merge(planes) # Add to the expanded another plane with zeros
## [complex_and_real]
## [dft]
cv.dft(complexI, complexI) # this way the result may fit in the source matrix
## [dft]
# compute the magnitude and switch to logarithmic scale
# = > log(1 + sqrt(Re(DFT(I)) ^ 2 + Im(DFT(I)) ^ 2))
## [magnitude]
cv.split(complexI, planes) # planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
cv.magnitude(planes[0], planes[1], planes[0])# planes[0] = magnitude
magI = planes[0]
## [magnitude]
## [log]
matOfOnes = np.ones(magI.shape, dtype=magI.dtype)
cv.add(matOfOnes, magI, magI) # switch to logarithmic scale
cv.log(magI, magI)
## [log]
## [crop_rearrange]
magI_rows, magI_cols = magI.shape
# crop the spectrum, if it has an odd number of rows or columns
magI = magI[0:(magI_rows & -2), 0:(magI_cols & -2)]
cx = int(magI_rows/2)
cy = int(magI_cols/2)
q0 = magI[0:cx, 0:cy] # Top-Left - Create a ROI per quadrant
q1 = magI[cx:cx+cx, 0:cy] # Top-Right
q2 = magI[0:cx, cy:cy+cy] # Bottom-Left
q3 = magI[cx:cx+cx, cy:cy+cy] # Bottom-Right
tmp = np.copy(q0) # swap quadrants (Top-Left with Bottom-Right)
magI[0:cx, 0:cy] = q3
magI[cx:cx + cx, cy:cy + cy] = tmp
tmp = np.copy(q1) # swap quadrant (Top-Right with Bottom-Left)
magI[cx:cx + cx, 0:cy] = q2
magI[0:cx, cy:cy + cy] = tmp
## [crop_rearrange]
## [normalize]
cv.normalize(magI, magI, 0, 1, cv.NORM_MINMAX) # Transform the matrix with float values into a
## viewable image form(float between values 0 and 1).
## [normalize]
cv.imshow("Input Image" , I ) # Show the result
cv.imshow("spectrum magnitude", magI)
cv.waitKey()
if __name__ == "__main__":
main(sys.argv[1:])
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
source_window = 'Image'
maxTrackbar = 100
rng.seed(12345)
def goodFeaturesToTrack_Demo(val):
maxCorners = max(val, 1)
# Parameters for Shi-Tomasi algorithm
qualityLevel = 0.01
minDistance = 10
blockSize = 3
gradientSize = 3
useHarrisDetector = False
k = 0.04
# Copy the source image
copy = np.copy(src)
# Apply corner detection
corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \
blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k)
# Draw corners detected
print('** Number of corners detected:', corners.shape[0])
radius = 4
for i in range(corners.shape[0]):
cv.circle(copy, (corners[i,0,0], corners[i,0,1]), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
# Show what you got
cv.namedWindow(source_window)
cv.imshow(source_window, copy)
# Load source image and convert it to gray
parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.')
parser.add_argument('--input', help='Path to input image.', default='pic3.png')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
# Create a window and a trackbar
cv.namedWindow(source_window)
maxCorners = 23 # initial threshold
cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo)
cv.imshow(source_window, src)
goodFeaturesToTrack_Demo(maxCorners)
cv.waitKey()
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
myHarris_window = 'My Harris corner detector'
myShiTomasi_window = 'My Shi Tomasi corner detector'
myHarris_qualityLevel = 50
myShiTomasi_qualityLevel = 50
max_qualityLevel = 100
rng.seed(12345)
def myHarris_function(val):
myHarris_copy = np.copy(src)
myHarris_qualityLevel = max(val, 1)
for i in range(src_gray.shape[0]):
for j in range(src_gray.shape[1]):
if Mc[i,j] > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )*myHarris_qualityLevel/max_qualityLevel:
cv.circle(myHarris_copy, (j,i), 4, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
cv.imshow(myHarris_window, myHarris_copy)
def myShiTomasi_function(val):
myShiTomasi_copy = np.copy(src)
myShiTomasi_qualityLevel = max(val, 1)
for i in range(src_gray.shape[0]):
for j in range(src_gray.shape[1]):
if myShiTomasi_dst[i,j] > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel:
cv.circle(myShiTomasi_copy, (j,i), 4, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
cv.imshow(myShiTomasi_window, myShiTomasi_copy)
# Load source image and convert it to gray
parser = argparse.ArgumentParser(description='Code for Creating your own corner detector tutorial.')
parser.add_argument('--input', help='Path to input image.', default='building.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
# Set some parameters
blockSize = 3
apertureSize = 3
# My Harris matrix -- Using cornerEigenValsAndVecs
myHarris_dst = cv.cornerEigenValsAndVecs(src_gray, blockSize, apertureSize)
# calculate Mc
Mc = np.empty(src_gray.shape, dtype=np.float32)
for i in range(src_gray.shape[0]):
for j in range(src_gray.shape[1]):
lambda_1 = myHarris_dst[i,j,0]
lambda_2 = myHarris_dst[i,j,1]
Mc[i,j] = lambda_1*lambda_2 - 0.04*pow( ( lambda_1 + lambda_2 ), 2 )
myHarris_minVal, myHarris_maxVal, _, _ = cv.minMaxLoc(Mc)
# Create Window and Trackbar
cv.namedWindow(myHarris_window)
cv.createTrackbar('Quality Level:', myHarris_window, myHarris_qualityLevel, max_qualityLevel, myHarris_function)
myHarris_function(myHarris_qualityLevel)
# My Shi-Tomasi -- Using cornerMinEigenVal
myShiTomasi_dst = cv.cornerMinEigenVal(src_gray, blockSize, apertureSize)
myShiTomasi_minVal, myShiTomasi_maxVal, _, _ = cv.minMaxLoc(myShiTomasi_dst)
# Create Window and Trackbar
cv.namedWindow(myShiTomasi_window)
cv.createTrackbar('Quality Level:', myShiTomasi_window, myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function)
myShiTomasi_function(myShiTomasi_qualityLevel)
cv.waitKey()
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
source_window = 'Image'
maxTrackbar = 25
rng.seed(12345)
def goodFeaturesToTrack_Demo(val):
maxCorners = max(val, 1)
# Parameters for Shi-Tomasi algorithm
qualityLevel = 0.01
minDistance = 10
blockSize = 3
gradientSize = 3
useHarrisDetector = False
k = 0.04
# Copy the source image
copy = np.copy(src)
# Apply corner detection
corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \
blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k)
# Draw corners detected
print('** Number of corners detected:', corners.shape[0])
radius = 4
for i in range(corners.shape[0]):
cv.circle(copy, (corners[i,0,0], corners[i,0,1]), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
# Show what you got
cv.namedWindow(source_window)
cv.imshow(source_window, copy)
# Set the needed parameters to find the refined corners
winSize = (5, 5)
zeroZone = (-1, -1)
criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, 40, 0.001)
# Calculate the refined corner locations
corners = cv.cornerSubPix(src_gray, corners, winSize, zeroZone, criteria)
# Write them down
for i in range(corners.shape[0]):
print(" -- Refined Corner [", i, "] (", corners[i,0,0], ",", corners[i,0,1], ")")
# Load source image and convert it to gray
parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.')
parser.add_argument('--input', help='Path to input image.', default='pic3.png')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
# Create a window and a trackbar
cv.namedWindow(source_window)
maxCorners = 10 # initial threshold
cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo)
cv.imshow(source_window, src)
goodFeaturesToTrack_Demo(maxCorners)
cv.waitKey()
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
source_window = 'Source image'
corners_window = 'Corners detected'
max_thresh = 255
def cornerHarris_demo(val):
thresh = val
# Detector parameters
blockSize = 2
apertureSize = 3
k = 0.04
# Detecting corners
dst = cv.cornerHarris(src_gray, blockSize, apertureSize, k)
# Normalizing
dst_norm = np.empty(dst.shape, dtype=np.float32)
cv.normalize(dst, dst_norm, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
dst_norm_scaled = cv.convertScaleAbs(dst_norm)
# Drawing a circle around corners
for i in range(dst_norm.shape[0]):
for j in range(dst_norm.shape[1]):
if int(dst_norm[i,j]) > thresh:
cv.circle(dst_norm_scaled, (j,i), 5, (0), 2)
# Showing the result
cv.namedWindow(corners_window)
cv.imshow(corners_window, dst_norm_scaled)
# Load source image and convert it to gray
parser = argparse.ArgumentParser(description='Code for Harris corner detector tutorial.')
parser.add_argument('--input', help='Path to input image.', default='building.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
# Create a window and a trackbar
cv.namedWindow(source_window)
thresh = 200 # initial threshold
cv.createTrackbar('Threshold: ', source_window, thresh, max_thresh, cornerHarris_demo)
cv.imshow(source_window, src)
cornerHarris_demo(thresh)
cv.waitKey()
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
morph_size = 0
max_operator = 4
max_elem = 2
max_kernel_size = 21
title_trackbar_operator_type = 'Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat'
title_trackbar_element_type = 'Element:\n 0: Rect - 1: Cross - 2: Ellipse'
title_trackbar_kernel_size = 'Kernel size:\n 2n + 1'
title_window = 'Morphology Transformations Demo'
morph_op_dic = {0: cv.MORPH_OPEN, 1: cv.MORPH_CLOSE, 2: cv.MORPH_GRADIENT, 3: cv.MORPH_TOPHAT, 4: cv.MORPH_BLACKHAT}
def morphology_operations(val):
morph_operator = cv.getTrackbarPos(title_trackbar_operator_type, title_window)
morph_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_window)
morph_elem = 0
val_type = cv.getTrackbarPos(title_trackbar_element_type, title_window)
if val_type == 0:
morph_elem = cv.MORPH_RECT
elif val_type == 1:
morph_elem = cv.MORPH_CROSS
elif val_type == 2:
morph_elem = cv.MORPH_ELLIPSE
element = cv.getStructuringElement(morph_elem, (2*morph_size + 1, 2*morph_size+1), (morph_size, morph_size))
operation = morph_op_dic[morph_operator]
dst = cv.morphologyEx(src, operation, element)
cv.imshow(title_window, dst)
parser = argparse.ArgumentParser(description='Code for More Morphology Transformations tutorial.')
parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image: ', args.input)
exit(0)
cv.namedWindow(title_window)
cv.createTrackbar(title_trackbar_operator_type, title_window , 0, max_operator, morphology_operations)
cv.createTrackbar(title_trackbar_element_type, title_window , 0, max_elem, morphology_operations)
cv.createTrackbar(title_trackbar_kernel_size, title_window , 0, max_kernel_size, morphology_operations)
morphology_operations(0)
cv.waitKey()
|
import cv2 as cv
import numpy as np
img = cv.imread(cv.samples.findFile('sudoku.png'))
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
edges = cv.Canny(gray,50,150,apertureSize = 3)
lines = cv.HoughLines(edges,1,np.pi/180,200)
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(img,(x1,y1),(x2,y2),(0,0,255),2)
cv.imwrite('houghlines3.jpg',img)
|
import cv2 as cv
import numpy as np
img = cv.imread(cv.samples.findFile('sudoku.png'))
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
edges = cv.Canny(gray,50,150,apertureSize = 3)
lines = cv.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)
for line in lines:
x1,y1,x2,y2 = line[0]
cv.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv.imwrite('houghlines5.jpg',img)
|
from __future__ import print_function
import sys
import cv2 as cv
## [global_variables]
use_mask = False
img = None
templ = None
mask = None
image_window = "Source Image"
result_window = "Result window"
match_method = 0
max_Trackbar = 5
## [global_variables]
def main(argv):
if (len(sys.argv) < 3):
print('Not enough parameters')
print('Usage:\nmatch_template_demo.py <image_name> <template_name> [<mask_name>]')
return -1
## [load_image]
global img
global templ
img = cv.imread(sys.argv[1], cv.IMREAD_COLOR)
templ = cv.imread(sys.argv[2], cv.IMREAD_COLOR)
if (len(sys.argv) > 3):
global use_mask
use_mask = True
global mask
mask = cv.imread( sys.argv[3], cv.IMREAD_COLOR )
if ((img is None) or (templ is None) or (use_mask and (mask is None))):
print('Can\'t read one of the images')
return -1
## [load_image]
## [create_windows]
cv.namedWindow( image_window, cv.WINDOW_AUTOSIZE )
cv.namedWindow( result_window, cv.WINDOW_AUTOSIZE )
## [create_windows]
## [create_trackbar]
trackbar_label = 'Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED'
cv.createTrackbar( trackbar_label, image_window, match_method, max_Trackbar, MatchingMethod )
## [create_trackbar]
MatchingMethod(match_method)
## [wait_key]
cv.waitKey(0)
return 0
## [wait_key]
def MatchingMethod(param):
global match_method
match_method = param
## [copy_source]
img_display = img.copy()
## [copy_source]
## [match_template]
method_accepts_mask = (cv.TM_SQDIFF == match_method or match_method == cv.TM_CCORR_NORMED)
if (use_mask and method_accepts_mask):
result = cv.matchTemplate(img, templ, match_method, None, mask)
else:
result = cv.matchTemplate(img, templ, match_method)
## [match_template]
## [normalize]
cv.normalize( result, result, 0, 1, cv.NORM_MINMAX, -1 )
## [normalize]
## [best_match]
_minVal, _maxVal, minLoc, maxLoc = cv.minMaxLoc(result, None)
## [best_match]
## [match_loc]
if (match_method == cv.TM_SQDIFF or match_method == cv.TM_SQDIFF_NORMED):
matchLoc = minLoc
else:
matchLoc = maxLoc
## [match_loc]
## [imshow]
cv.rectangle(img_display, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
cv.rectangle(result, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
cv.imshow(image_window, img_display)
cv.imshow(result_window, result)
## [imshow]
pass
if __name__ == "__main__":
main(sys.argv[1:])
|
import sys
import cv2 as cv
import numpy as np
# Global Variables
DELAY_CAPTION = 1500
DELAY_BLUR = 100
MAX_KERNEL_LENGTH = 31
src = None
dst = None
window_name = 'Smoothing Demo'
def main(argv):
cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE)
# Load the source image
imageName = argv[0] if len(argv) > 0 else 'lena.jpg'
global src
src = cv.imread(cv.samples.findFile(imageName))
if src is None:
print ('Error opening image')
print ('Usage: smoothing.py [image_name -- default ../data/lena.jpg] \n')
return -1
if display_caption('Original Image') != 0:
return 0
global dst
dst = np.copy(src)
if display_dst(DELAY_CAPTION) != 0:
return 0
# Applying Homogeneous blur
if display_caption('Homogeneous Blur') != 0:
return 0
## [blur]
for i in range(1, MAX_KERNEL_LENGTH, 2):
dst = cv.blur(src, (i, i))
if display_dst(DELAY_BLUR) != 0:
return 0
## [blur]
# Applying Gaussian blur
if display_caption('Gaussian Blur') != 0:
return 0
## [gaussianblur]
for i in range(1, MAX_KERNEL_LENGTH, 2):
dst = cv.GaussianBlur(src, (i, i), 0)
if display_dst(DELAY_BLUR) != 0:
return 0
## [gaussianblur]
# Applying Median blur
if display_caption('Median Blur') != 0:
return 0
## [medianblur]
for i in range(1, MAX_KERNEL_LENGTH, 2):
dst = cv.medianBlur(src, i)
if display_dst(DELAY_BLUR) != 0:
return 0
## [medianblur]
# Applying Bilateral Filter
if display_caption('Bilateral Blur') != 0:
return 0
## [bilateralfilter]
# Remember, bilateral is a bit slow, so as value go higher, it takes long time
for i in range(1, MAX_KERNEL_LENGTH, 2):
dst = cv.bilateralFilter(src, i, i * 2, i / 2)
if display_dst(DELAY_BLUR) != 0:
return 0
## [bilateralfilter]
# Done
display_caption('Done!')
return 0
def display_caption(caption):
global dst
dst = np.zeros(src.shape, src.dtype)
rows, cols, _ch = src.shape
cv.putText(dst, caption,
(int(cols / 4), int(rows / 2)),
cv.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255))
return display_dst(DELAY_CAPTION)
def display_dst(delay):
cv.imshow(window_name, dst)
c = cv.waitKey(delay)
if c >= 0 : return -1
return 0
if __name__ == "__main__":
main(sys.argv[1:])
|
import cv2 as cv
import numpy as np
input_image = np.array((
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 255, 255, 255, 0, 0, 0, 255],
[0, 255, 255, 255, 0, 0, 0, 0],
[0, 255, 255, 255, 0, 255, 0, 0],
[0, 0, 255, 0, 0, 0, 0, 0],
[0, 0, 255, 0, 0, 255, 255, 0],
[0,255, 0, 255, 0, 0, 255, 0],
[0, 255, 255, 255, 0, 0, 0, 0]), dtype="uint8")
kernel = np.array((
[0, 1, 0],
[1, -1, 1],
[0, 1, 0]), dtype="int")
output_image = cv.morphologyEx(input_image, cv.MORPH_HITMISS, kernel)
rate = 50
kernel = (kernel + 1) * 127
kernel = np.uint8(kernel)
kernel = cv.resize(kernel, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow("kernel", kernel)
cv.moveWindow("kernel", 0, 0)
input_image = cv.resize(input_image, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow("Original", input_image)
cv.moveWindow("Original", 0, 200)
output_image = cv.resize(output_image, None , fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow("Hit or Miss", output_image)
cv.moveWindow("Hit or Miss", 500, 200)
cv.waitKey(0)
cv.destroyAllWindows()
|
from __future__ import print_function
import cv2 as cv
import argparse
max_value = 255
max_value_H = 360//2
low_H = 0
low_S = 0
low_V = 0
high_H = max_value_H
high_S = max_value
high_V = max_value
window_capture_name = 'Video Capture'
window_detection_name = 'Object Detection'
low_H_name = 'Low H'
low_S_name = 'Low S'
low_V_name = 'Low V'
high_H_name = 'High H'
high_S_name = 'High S'
high_V_name = 'High V'
## [low]
def on_low_H_thresh_trackbar(val):
global low_H
global high_H
low_H = val
low_H = min(high_H-1, low_H)
cv.setTrackbarPos(low_H_name, window_detection_name, low_H)
## [low]
## [high]
def on_high_H_thresh_trackbar(val):
global low_H
global high_H
high_H = val
high_H = max(high_H, low_H+1)
cv.setTrackbarPos(high_H_name, window_detection_name, high_H)
## [high]
def on_low_S_thresh_trackbar(val):
global low_S
global high_S
low_S = val
low_S = min(high_S-1, low_S)
cv.setTrackbarPos(low_S_name, window_detection_name, low_S)
def on_high_S_thresh_trackbar(val):
global low_S
global high_S
high_S = val
high_S = max(high_S, low_S+1)
cv.setTrackbarPos(high_S_name, window_detection_name, high_S)
def on_low_V_thresh_trackbar(val):
global low_V
global high_V
low_V = val
low_V = min(high_V-1, low_V)
cv.setTrackbarPos(low_V_name, window_detection_name, low_V)
def on_high_V_thresh_trackbar(val):
global low_V
global high_V
high_V = val
high_V = max(high_V, low_V+1)
cv.setTrackbarPos(high_V_name, window_detection_name, high_V)
parser = argparse.ArgumentParser(description='Code for Thresholding Operations using inRange tutorial.')
parser.add_argument('--camera', help='Camera divide number.', default=0, type=int)
args = parser.parse_args()
## [cap]
cap = cv.VideoCapture(args.camera)
## [cap]
## [window]
cv.namedWindow(window_capture_name)
cv.namedWindow(window_detection_name)
## [window]
## [trackbar]
cv.createTrackbar(low_H_name, window_detection_name , low_H, max_value_H, on_low_H_thresh_trackbar)
cv.createTrackbar(high_H_name, window_detection_name , high_H, max_value_H, on_high_H_thresh_trackbar)
cv.createTrackbar(low_S_name, window_detection_name , low_S, max_value, on_low_S_thresh_trackbar)
cv.createTrackbar(high_S_name, window_detection_name , high_S, max_value, on_high_S_thresh_trackbar)
cv.createTrackbar(low_V_name, window_detection_name , low_V, max_value, on_low_V_thresh_trackbar)
cv.createTrackbar(high_V_name, window_detection_name , high_V, max_value, on_high_V_thresh_trackbar)
## [trackbar]
while True:
## [while]
ret, frame = cap.read()
if frame is None:
break
frame_HSV = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
frame_threshold = cv.inRange(frame_HSV, (low_H, low_S, low_V), (high_H, high_S, high_V))
## [while]
## [show]
cv.imshow(window_capture_name, frame)
cv.imshow(window_detection_name, frame_threshold)
## [show]
key = cv.waitKey(30)
if key == ord('q') or key == 27:
break
|
from __future__ import print_function
import cv2 as cv
import argparse
max_value = 255
max_type = 4
max_binary_value = 255
trackbar_type = 'Type: \n 0: Binary \n 1: Binary Inverted \n 2: Truncate \n 3: To Zero \n 4: To Zero Inverted'
trackbar_value = 'Value'
window_name = 'Threshold Demo'
## [Threshold_Demo]
def Threshold_Demo(val):
#0: Binary
#1: Binary Inverted
#2: Threshold Truncated
#3: Threshold to Zero
#4: Threshold to Zero Inverted
threshold_type = cv.getTrackbarPos(trackbar_type, window_name)
threshold_value = cv.getTrackbarPos(trackbar_value, window_name)
_, dst = cv.threshold(src_gray, threshold_value, max_binary_value, threshold_type )
cv.imshow(window_name, dst)
## [Threshold_Demo]
parser = argparse.ArgumentParser(description='Code for Basic Thresholding Operations tutorial.')
parser.add_argument('--input', help='Path to input image.', default='stuff.jpg')
args = parser.parse_args()
## [load]
# Load an image
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image: ', args.input)
exit(0)
# Convert the image to Gray
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
## [load]
## [window]
# Create a window to display results
cv.namedWindow(window_name)
## [window]
## [trackbar]
# Create Trackbar to choose type of Threshold
cv.createTrackbar(trackbar_type, window_name , 3, max_type, Threshold_Demo)
# Create Trackbar to choose Threshold value
cv.createTrackbar(trackbar_value, window_name , 0, max_value, Threshold_Demo)
## [trackbar]
# Call the function to initialize
Threshold_Demo(0)
# Wait until user finishes program
cv.waitKey()
|
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
alpha = 1.0
alpha_max = 500
beta = 0
beta_max = 200
gamma = 1.0
gamma_max = 200
def basicLinearTransform():
res = cv.convertScaleAbs(img_original, alpha=alpha, beta=beta)
img_corrected = cv.hconcat([img_original, res])
cv.imshow("Brightness and contrast adjustments", img_corrected)
def gammaCorrection():
## [changing-contrast-brightness-gamma-correction]
lookUpTable = np.empty((1,256), np.uint8)
for i in range(256):
lookUpTable[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
res = cv.LUT(img_original, lookUpTable)
## [changing-contrast-brightness-gamma-correction]
img_gamma_corrected = cv.hconcat([img_original, res])
cv.imshow("Gamma correction", img_gamma_corrected)
def on_linear_transform_alpha_trackbar(val):
global alpha
alpha = val / 100
basicLinearTransform()
def on_linear_transform_beta_trackbar(val):
global beta
beta = val - 100
basicLinearTransform()
def on_gamma_correction_trackbar(val):
global gamma
gamma = val / 100
gammaCorrection()
parser = argparse.ArgumentParser(description='Code for Changing the contrast and brightness of an image! tutorial.')
parser.add_argument('--input', help='Path to input image.', default='lena.jpg')
args = parser.parse_args()
img_original = cv.imread(cv.samples.findFile(args.input))
if img_original is None:
print('Could not open or find the image: ', args.input)
exit(0)
img_corrected = np.empty((img_original.shape[0], img_original.shape[1]*2, img_original.shape[2]), img_original.dtype)
img_gamma_corrected = np.empty((img_original.shape[0], img_original.shape[1]*2, img_original.shape[2]), img_original.dtype)
img_corrected = cv.hconcat([img_original, img_original])
img_gamma_corrected = cv.hconcat([img_original, img_original])
cv.namedWindow('Brightness and contrast adjustments')
cv.namedWindow('Gamma correction')
alpha_init = int(alpha *100)
cv.createTrackbar('Alpha gain (contrast)', 'Brightness and contrast adjustments', alpha_init, alpha_max, on_linear_transform_alpha_trackbar)
beta_init = beta + 100
cv.createTrackbar('Beta bias (brightness)', 'Brightness and contrast adjustments', beta_init, beta_max, on_linear_transform_beta_trackbar)
gamma_init = int(gamma * 100)
cv.createTrackbar('Gamma correction', 'Gamma correction', gamma_init, gamma_max, on_gamma_correction_trackbar)
on_linear_transform_alpha_trackbar(alpha_init)
on_gamma_correction_trackbar(gamma_init)
cv.waitKey()
|
from __future__ import print_function
from builtins import input
import cv2 as cv
import numpy as np
import argparse
# Read image given by user
## [basic-linear-transform-load]
parser = argparse.ArgumentParser(description='Code for Changing the contrast and brightness of an image! tutorial.')
parser.add_argument('--input', help='Path to input image.', default='lena.jpg')
args = parser.parse_args()
image = cv.imread(cv.samples.findFile(args.input))
if image is None:
print('Could not open or find the image: ', args.input)
exit(0)
## [basic-linear-transform-load]
## [basic-linear-transform-output]
new_image = np.zeros(image.shape, image.dtype)
## [basic-linear-transform-output]
## [basic-linear-transform-parameters]
alpha = 1.0 # Simple contrast control
beta = 0 # Simple brightness control
# Initialize values
print(' Basic Linear Transforms ')
print('-------------------------')
try:
alpha = float(input('* Enter the alpha value [1.0-3.0]: '))
beta = int(input('* Enter the beta value [0-100]: '))
except ValueError:
print('Error, not a number')
## [basic-linear-transform-parameters]
# Do the operation new_image(i,j) = alpha*image(i,j) + beta
# Instead of these 'for' loops we could have used simply:
# new_image = cv.convertScaleAbs(image, alpha=alpha, beta=beta)
# but we wanted to show you how to access the pixels :)
## [basic-linear-transform-operation]
for y in range(image.shape[0]):
for x in range(image.shape[1]):
for c in range(image.shape[2]):
new_image[y,x,c] = np.clip(alpha*image[y,x,c] + beta, 0, 255)
## [basic-linear-transform-operation]
## [basic-linear-transform-display]
# Show stuff
cv.imshow('Original Image', image)
cv.imshow('New Image', new_image)
# Wait until user press some key
cv.waitKey()
## [basic-linear-transform-display]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.