diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index d08e15e7..5b2f0ff4 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -3,7 +3,7 @@ name: Lint Check
on: [push, pull_request]
jobs:
- mypy:
+ lint:
runs-on: ubuntu-latest
steps:
@@ -17,8 +17,12 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- pip install mypy
+ pip install mypy black pycodestyle pydocstyle
- - name: Run mypy
+ - name: Run linters
run: |
mypy guibot
+ black --check --diff --color guibot
+ # only excluded checks are conflicts with black and within pycodestyle
+ pycodestyle --ignore=E203,E501,W503 guibot
+ pydocstyle guibot
diff --git a/guibot/__init__.py b/guibot/__init__.py
index 1219a921..15d7fc1a 100644
--- a/guibot/__init__.py
+++ b/guibot/__init__.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Package with the complete guibot modules and functionality.
SUMMARY
------------------------------------------------------
-Package with the complete guibot modules and functionality.
INTERFACE
diff --git a/guibot/calibrator.py b/guibot/calibrator.py
index 60181153..529b432f 100644
--- a/guibot/calibrator.py
+++ b/guibot/calibrator.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Calibration and benchmarking for all CV backends on a given matching target.
SUMMARY
------------------------------------------------------
-Calibration and benchmarking for all CV backends on a given matching target.
INTERFACE
@@ -29,6 +29,7 @@
import math
import copy
from typing import Generator
+import logging
from .finder import *
from .target import Target, Image
@@ -36,21 +37,23 @@
from .errors import *
from .location import Location
-import logging
-log = logging.getLogger('guibot.calibrator')
+
+log = logging.getLogger("guibot.calibrator")
#: explicit blacklist of backend combinations to skip for benchmarking
-benchmark_blacklist = [("mixed", "normal", "mixed", "east", "hmm", "adaptive", "adaptive"),
- ("mixed", "adaptive", "mixed", "east", "hmm", "adaptive", "adaptive"),
- ("mixed", "canny", "mixed", "east", "hmm", "adaptive", "adaptive")]
+benchmark_blacklist = [
+ ("mixed", "normal", "mixed", "east", "hmm", "adaptive", "adaptive"),
+ ("mixed", "adaptive", "mixed", "east", "hmm", "adaptive", "adaptive"),
+ ("mixed", "canny", "mixed", "east", "hmm", "adaptive", "adaptive"),
+]
class Calibrator(object):
"""
- Provides with a group of methods to facilitate and automate the selection
- of algorithms and parameters that are most suitable for a given preselected
- image matching pair.
+ Provides with a group of methods to facilitate and automate the selection of algorithms and parameters.
+
+ This is most suitable for a given preselected image matching pair.
Use the benchmarking method to choose the best algorithm to find your image.
Use the calibration method to find the best parameters if you have already
@@ -58,8 +61,9 @@ class Calibrator(object):
multiple random starts from a uniform or normal probability distribution.
"""
- def __init__(self, needle: Target = None, haystack: Image = None,
- config: str = None) -> None:
+ def __init__(
+ self, needle: Target = None, haystack: Image = None, config: str = None
+ ) -> None:
"""
Build a calibrator object for a given match case.
@@ -79,21 +83,32 @@ def __init__(self, needle: Target = None, haystack: Image = None,
haystack = Target.from_data_file(haystack)
maximize = maximize == "max"
self.cases.append((needle, haystack, maximize))
- log.info("Registering match case with needle %s and haystack %s for %s",
- needle, haystack, "maximizing" if maximize else "minimizing")
+ log.info(
+ "Registering match case with needle %s and haystack %s for %s",
+ needle,
+ haystack,
+ "maximizing" if maximize else "minimizing",
+ )
else:
- raise ValueError("Need at least a single needle/haystack for calibration"
- " or a config file for more than one match case")
+ raise ValueError(
+ "Need at least a single needle/haystack for calibration"
+ " or a config file for more than one match case"
+ )
# this attribute can be changed to use different run function
self.run = self.run_default
- def benchmark(self, finder: Finder, random_starts: int = 0, uniform: bool = False,
- calibration: bool = False, max_attempts: int = 3,
- **kwargs: dict[str, type]) -> list[tuple[str, float, float]]:
+ def benchmark(
+ self,
+ finder: Finder,
+ random_starts: int = 0,
+ uniform: bool = False,
+ calibration: bool = False,
+ max_attempts: int = 3,
+ **kwargs: dict[str, type]
+ ) -> list[tuple[str, float, float]]:
"""
- Perform benchmarking on all available algorithms of a finder
- for a given needle and haystack.
+ Perform benchmarking on all available algorithms of a finder for a given needle and haystack.
:param finder: CV backend whose backend algorithms will be benchmarked
:param random_starts: number of random starts to try with (0 for nonrandom)
@@ -109,8 +124,10 @@ def benchmark(self, finder: Finder, random_starts: int = 0, uniform: bool = Fals
for a given `needle` and `haystack`.
"""
results = []
- log.info("Performing benchmarking %s calibration",
- "with" if calibration else "without")
+ log.info(
+ "Performing benchmarking %s calibration",
+ "with" if calibration else "without",
+ )
# block logging since we need all its info after the matching finishes
ImageLogger.accumulate_logging = True
@@ -121,7 +138,9 @@ def benchmark(self, finder: Finder, random_starts: int = 0, uniform: bool = Fals
ordered_categories.remove("find")
# test all matching methods of the current finder
- def backend_tuples(category_list: list[str], finder: Finder) -> Generator[tuple[str, ...], None, None]:
+ def backend_tuples(
+ category_list: list[str], finder: Finder
+ ) -> Generator[tuple[str, ...], None, None]:
if len(category_list) == 0:
yield ()
else:
@@ -130,6 +149,7 @@ def backend_tuples(category_list: list[str], finder: Finder) -> Generator[tuple[
for backend in backends:
for z in backend_tuples(category_list[1:], finder):
yield (backend,) + z
+
for backend_tuple in backend_tuples(ordered_categories, finder):
if backend_tuple in benchmark_blacklist:
log.warning("Skipping blacklisted benchmarked backend combination")
@@ -138,33 +158,56 @@ def backend_tuples(category_list: list[str], finder: Finder) -> Generator[tuple[
log.info("Benchmark testing with %s", method)
for backend, category in zip(backend_tuple, ordered_categories):
- finder.configure_backend(backend=backend, category=category, reset=False)
+ finder.configure_backend(
+ backend=backend, category=category, reset=False
+ )
finder.can_calibrate(category, calibration)
try:
- finder.synchronize_backend(backend=backend, category=category, reset=False)
+ finder.synchronize_backend(
+ backend=backend, category=category, reset=False
+ )
except UnsupportedBackendError as error:
- log.debug("Skipping synchronization for %s/backend=%s", category, backend)
+ log.debug(
+ "Skipping synchronization for %s/backend=%s", category, backend
+ )
if random_starts > 0:
- self.search(finder, random_starts=random_starts, uniform=uniform,
- calibration=calibration, max_attempts=max_attempts, **kwargs)
+ self.search(
+ finder,
+ random_starts=random_starts,
+ uniform=uniform,
+ calibration=calibration,
+ max_attempts=max_attempts,
+ **kwargs
+ )
elif calibration:
self.calibrate(finder, max_attempts=max_attempts, **kwargs)
start_time = time.time()
similarity = 1.0 - self.run(finder, **kwargs)
total_time = time.time() - start_time
- log.debug("Obtained similarity %s from %s in %ss", similarity, method, total_time)
+ log.debug(
+ "Obtained similarity %s from %s in %ss", similarity, method, total_time
+ )
results.append((method, similarity, total_time))
ImageLogger.accumulate_logging = False
return sorted(results, key=lambda x: x[1], reverse=True)
- def search(self, finder: Finder, random_starts: int = 1, uniform: bool = False,
- calibration: bool = True, max_attempts: int = 3, **kwargs: dict[str, type]) -> float:
+ def search(
+ self,
+ finder: Finder,
+ random_starts: int = 1,
+ uniform: bool = False,
+ calibration: bool = True,
+ max_attempts: int = 3,
+ **kwargs: dict[str, type]
+ ) -> float:
"""
- Search for the best match configuration for a given needle and haystack
- using calibration from random initial conditions.
+ Search for best match configuration via random initial condition calibration.
+
+ Find the best match configuration for a given needle and haystack using
+ calibration from random initial conditions.
:param finder: CV backend to use in order to determine deltas, fixed, and free
parameters and ultimately tweak to minimize error
@@ -185,7 +228,9 @@ def search(self, finder: Finder, random_starts: int = 1, uniform: bool = False,
best_error = self.run(finder, **kwargs)
best_params = init_params = finder.params
for i in range(random_starts):
- log.info("Random run %s\\%s, best error %s", i+1, random_starts, best_error)
+ log.info(
+ "Random run %s\\%s, best error %s", i + 1, random_starts, best_error
+ )
params = copy.deepcopy(init_params)
for category in params.keys():
@@ -197,20 +242,33 @@ def search(self, finder: Finder, random_starts: int = 1, uniform: bool = False,
mean = None if uniform else param.value
deviation = None if uniform else param.delta
param.value = param.random_value(mean, deviation)
- log.debug("Setting %s/%s to random value=%s", category, key, param.value)
+ log.debug(
+ "Setting %s/%s to random value=%s",
+ category,
+ key,
+ param.value,
+ )
finder.params = params
if calibration:
- error = 1.0 - self.calibrate(finder, max_attempts=max_attempts, **kwargs)
+ error = 1.0 - self.calibrate(
+ finder, max_attempts=max_attempts, **kwargs
+ )
else:
error = self.run(finder, **kwargs)
if error < best_error:
- log.info("Random start ended with smaller error %s < %s", error, best_error)
+ log.info(
+ "Random start ended with smaller error %s < %s", error, best_error
+ )
best_error = error
best_params = params
else:
- log.debug("Random start did not end with smaller error %s >= %s", error, best_error)
+ log.debug(
+ "Random start did not end with smaller error %s >= %s",
+ error,
+ best_error,
+ )
ImageLogger.accumulate_logging = False
log.info("Best error for all random starts is %s", best_error)
@@ -220,14 +278,23 @@ def search(self, finder: Finder, random_starts: int = 1, uniform: bool = False,
for key in finder.params[category].keys():
param = finder.params[category][key]
if hasattr(param, "value"):
- log.log(9, "\t%s/%s with value %s +/- delta of %s",
- category, key, param.value, param.delta)
+ log.log(
+ 9,
+ "\t%s/%s with value %s +/- delta of %s",
+ category,
+ key,
+ param.value,
+ param.delta,
+ )
return 1.0 - best_error
- def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, type]) -> float:
+ def calibrate(
+ self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, type]
+ ) -> float:
"""
- Calibrate the available match configuration for a given needle
- and haystack minimizing the matchign error.
+ Calibrate the available match configuration for a given needle and haystack.
+
+ The calibration minimizes the matching error.
:param finder: configuration for the CV backend to calibrate
:param max_attempts: maximal number of refinements to reach
@@ -256,7 +323,7 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t
log.log(9, "Calibration start with error=%s", best_error)
for n in range(max_attempts):
- log.info("Try %s\\%s, best error %s", n+1, max_attempts, best_error)
+ log.info("Try %s\\%s, best error %s", n + 1, max_attempts, best_error)
if best_error == 0.0:
log.info("Exiting due to zero error")
@@ -269,17 +336,30 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t
if key == "backend":
continue
elif not isinstance(param, CVParameter):
- log.warning("The parameter %s/%s is not a CV parameter!", category, key)
+ log.warning(
+ "The parameter %s/%s is not a CV parameter!", category, key
+ )
continue
elif param.fixed:
log.log(9, "Skip fixed parameter: %s/%s", category, key)
continue
elif isinstance(param.value, str):
- log.log(9, "Skip string parameter: %s/%s (calibration not supported)", category, key)
+ log.log(
+ 9,
+ "Skip string parameter: %s/%s (calibration not supported)",
+ category,
+ key,
+ )
continue
elif param.delta < param.tolerance:
- log.log(9, "The parameter %s/%s has slowed down to %s below tolerance %s",
- category, key, param.delta, param.tolerance)
+ log.log(
+ 9,
+ "The parameter %s/%s has slowed down to %s below tolerance %s",
+ category,
+ key,
+ param.delta,
+ param.tolerance,
+ )
continue
else:
slowdown_flag = False
@@ -288,15 +368,17 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t
# add the delta to the current parameter
if isinstance(param.value, float):
if param.range[1] is not None:
- param.value = min(float(start_value) + param.delta,
- param.range[1])
+ param.value = min(
+ float(start_value) + param.delta, param.range[1]
+ )
else:
param.value = float(start_value) + param.delta
elif isinstance(param.value, int) and not param.enumerated:
intdelta = int(math.ceil(param.delta))
if param.range[1] is not None:
- param.value = min(int(start_value) + intdelta,
- param.range[1])
+ param.value = min(
+ int(start_value) + intdelta, param.range[1]
+ )
else:
param.value = int(start_value) + intdelta
# remaining types require special handling
@@ -307,8 +389,17 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t
continue
param.value = mode
error = self.run(finder, **kwargs)
- log.log(9, "%s/%s: %s +> %s (delta: %s) = %s (best: %s)", category, key,
- start_value, param.value, param.delta, error, best_error)
+ log.log(
+ 9,
+ "%s/%s: %s +> %s (delta: %s) = %s (best: %s)",
+ category,
+ key,
+ start_value,
+ param.value,
+ param.delta,
+ error,
+ best_error,
+ )
if error < best_error:
best_error = error
param.value = mode
@@ -322,12 +413,25 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t
else:
param.value = True
else:
- raise ValueError("Parameter %s/%s is of unsupported type %s",
- category, key, type(param.value))
+ raise ValueError(
+ "Parameter %s/%s is of unsupported type %s",
+ category,
+ key,
+ type(param.value),
+ )
error = self.run(finder, **kwargs)
- log.log(9, "%s/%s: %s +> %s (delta: %s) = %s (best: %s)", category, key,
- start_value, param.value, param.delta, error, best_error)
+ log.log(
+ 9,
+ "%s/%s: %s +> %s (delta: %s) = %s (best: %s)",
+ category,
+ key,
+ start_value,
+ param.value,
+ param.delta,
+ error,
+ best_error,
+ )
if error < best_error:
best_error = error
param.delta *= 1.1
@@ -336,15 +440,17 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t
if isinstance(param.value, float):
if param.range[0] is not None:
- param.value = max(float(start_value) - param.delta,
- param.range[0])
+ param.value = max(
+ float(start_value) - param.delta, param.range[0]
+ )
else:
param.value = float(start_value) - param.delta
elif isinstance(param.value, int):
intdelta = int(math.floor(param.delta))
if param.range[0] is not None:
- param.value = max(int(start_value) - intdelta,
- param.range[0])
+ param.value = max(
+ int(start_value) - intdelta, param.range[0]
+ )
else:
param.value = int(start_value) - intdelta
elif isinstance(param.value, bool):
@@ -353,8 +459,17 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t
continue
error = self.run(finder, **kwargs)
- log.log(9, "%s/%s: %s -> %s (delta: %s) = %s (best: %s)", category, key,
- start_value, param.value, param.delta, error, best_error)
+ log.log(
+ 9,
+ "%s/%s: %s -> %s (delta: %s) = %s (best: %s)",
+ category,
+ key,
+ start_value,
+ param.value,
+ param.delta,
+ error,
+ best_error,
+ )
if error < best_error:
best_error = error
param.delta *= 1.1
@@ -381,8 +496,14 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t
delattr(param, "max_delta")
elif param.fixed:
param.delta = 0.0
- log.log(9, "\t%s/%s with value %s +/- delta of %s",
- category, key, param.value, param.delta)
+ log.log(
+ 9,
+ "\t%s/%s with value %s +/- delta of %s",
+ category,
+ key,
+ param.value,
+ param.delta,
+ )
return 1.0 - best_error
def run_default(self, finder: Finder, **_kwargs: dict[str, type]) -> float:
@@ -411,8 +532,7 @@ def run_default(self, finder: Finder, **_kwargs: dict[str, type]) -> float:
def run_performance(self, finder: Finder, **kwargs: dict[str, type]) -> float:
"""
- Run a match case and return error from the match as dissimilarity
- and linear performance penalty.
+ Run a match case and return error from the match as dissimilarity and linear performance penalty.
:param finder: finder with match configuration to use for the run
:returns: error obtained as unity minus similarity
@@ -442,8 +562,9 @@ def run_performance(self, finder: Finder, **kwargs: dict[str, type]) -> float:
def run_peak(self, finder: Finder, **kwargs: dict[str, type]) -> float:
"""
- Run a match case and return error from the match as failure to obtain
- high similarity of one match and low similarity of all others.
+ Run match case and return a peak error from the match.
+
+ A peak error is a failure to obtain high similarity of one match and low similarity of all others.
:param finder: finder with match configuration to use for the run
:returns: error obtained as unity minus similarity
@@ -502,7 +623,10 @@ def _handle_restricted_values(self, finder: Finder) -> None:
params["blockSize"].value += 1
if "tdetect" in finder.params:
params = finder.params["tdetect"]
- if params["backend"] == "east" and params["input_res_x"].value != params["input_res_y"].value:
+ if (
+ params["backend"] == "east"
+ and params["input_res_x"].value != params["input_res_y"].value
+ ):
params["input_res_x"].value = params["input_res_y"].value
if "ocr" in finder.params:
params = finder.params["ocr"]
diff --git a/guibot/config.py b/guibot/config.py
index fb85002a..ce6f786d 100644
--- a/guibot/config.py
+++ b/guibot/config.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Global and local (per target or region instance) configuration.
SUMMARY
------------------------------------------------------
-Global and local (per target or region instance) configuration.
INTERFACE
@@ -30,7 +30,8 @@
from .errors import *
-log = logging.getLogger('guibot.config')
+
+log = logging.getLogger("guibot.config")
class GlobalConfig(type):
@@ -79,112 +80,127 @@ class GlobalConfig(type):
_deep_learn_backend = "pytorch"
_hybrid_match_backend = "template"
- def toggle_delay(self, value: float = None) -> float | None:
+ def toggle_delay(cls, value: float = None) -> float | None:
"""
- Getter/setter for property attribute.
+ Get or set property attribute.
:param value: time interval between mouse down and up in a click
- :returns: current value if no argument was passed otherwise only sets it
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._toggle_delay
+ return cls._toggle_delay
else:
- GlobalConfig._toggle_delay = value
+ cls._toggle_delay = value
return None
+
#: time interval between mouse down and up in a click
toggle_delay = property(fget=toggle_delay, fset=toggle_delay)
- def click_delay(self, value: float = None) -> float | None:
+ def click_delay(cls, value: float = None) -> float | None:
"""
- Same as :py:func:`GlobalConfig.toggle_delay` but with
+ Get or set property attribute.
:param value: time interval after a click (in a double or n-click)
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._click_delay
+ return cls._click_delay
else:
- GlobalConfig._click_delay = value
+ cls._click_delay = value
return None
+
#: time interval after a click (in a double or n-click)
click_delay = property(fget=click_delay, fset=click_delay)
- def delay_after_drag(self, value: float = None) -> float | None:
+ def delay_after_drag(cls, value: float = None) -> float | None:
"""
- Same as :py:func:`GlobalConfig.toggle_delay` but with
+ Get or set property attribute.
:param value: timeout before drag operation
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._drag_delay
+ return cls._drag_delay
else:
- GlobalConfig._drag_delay = value
+ cls._drag_delay = value
return None
+
#: timeout before drag operation
delay_after_drag = property(fget=delay_after_drag, fset=delay_after_drag)
- def delay_before_drop(self, value: float = None) -> float | None:
+ def delay_before_drop(cls, value: float = None) -> float | None:
"""
- Same as :py:func:`GlobalConfig.toggle_delay` but with
+ Get or set property attribute.
:param value: timeout before drop operation
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._drop_delay
+ return cls._drop_delay
else:
- GlobalConfig._drop_delay = value
+ cls._drop_delay = value
return None
+
#: timeout before drop operation
delay_before_drop = property(fget=delay_before_drop, fset=delay_before_drop)
- def delay_before_keys(self, value: float = None) -> float | None:
+ def delay_before_keys(cls, value: float = None) -> float | None:
"""
- Same as :py:func:`GlobalConfig.toggle_delay` but with
+ Get or set property attribute.
:param value: timeout before key press operation
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._keys_delay
+ return cls._keys_delay
else:
- GlobalConfig._keys_delay = value
+ cls._keys_delay = value
return None
+
#: timeout before key press operation
delay_before_keys = property(fget=delay_before_keys, fset=delay_before_keys)
- def delay_between_keys(self, value: float = None) -> float | None:
+ def delay_between_keys(cls, value: float = None) -> float | None:
"""
- Same as :py:func:`GlobalConfig.toggle_delay` but with
+ Get or set property attribute.
:param value: time interval between two consecutively typed keys
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._type_delay
+ return cls._type_delay
else:
- GlobalConfig._type_delay = value
+ cls._type_delay = value
return None
+
#: time interval between two consecutively typed keys
delay_between_keys = property(fget=delay_between_keys, fset=delay_between_keys)
- def rescan_speed_on_find(self, value: float = None) -> float | None:
+ def rescan_speed_on_find(cls, value: float = None) -> float | None:
"""
- Same as :py:func:`GlobalConfig.toggle_delay` but with
+ Get or set property attribute.
:param value: time interval between two image matching attempts
(used to reduce overhead on the CPU)
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._rescan_speed_on_find
+ return cls._rescan_speed_on_find
else:
- GlobalConfig._rescan_speed_on_find = value
+ cls._rescan_speed_on_find = value
return None
+
#: time interval between two image matching attempts (used to reduce overhead on the CPU)
- rescan_speed_on_find = property(fget=rescan_speed_on_find, fset=rescan_speed_on_find)
+ rescan_speed_on_find = property(
+ fget=rescan_speed_on_find, fset=rescan_speed_on_find
+ )
- def wait_for_animations(self, value: bool = None) -> bool | None:
+ def wait_for_animations(cls, value: bool = None) -> bool | None:
"""
Getter/setter for property attribute.
:param value: whether to wait for animations to complete and match only static (not moving) targets
- :returns: current value if no argument was passed otherwise only sets it
+ :returns: current value if no argument was passed otherwise None
:raises: :py:class:`ValueError` if value is not boolean or None
This is useful to handle highly animated environments with lots of moving
@@ -192,141 +208,162 @@ def wait_for_animations(self, value: bool = None) -> bool | None:
and the corresponding animation has finished.
"""
if value is None:
- return GlobalConfig._wait_for_animations
+ return cls._wait_for_animations
elif value is True or value is False:
- GlobalConfig._wait_for_animations = value
+ cls._wait_for_animations = value
return None
else:
raise ValueError
+
#: whether to wait for animations to complete and match only static (not moving) targets
wait_for_animations = property(fget=wait_for_animations, fset=wait_for_animations)
- def smooth_mouse_drag(self, value: bool = None) -> bool | None:
+ def smooth_mouse_drag(cls, value: bool = None) -> bool | None:
"""
Getter/setter for property attribute.
:param value: whether to move the mouse cursor to a location instantly or smoothly
- :returns: current value if no argument was passed otherwise only sets it
+ :returns: current value if no argument was passed otherwise None
:raises: :py:class:`ValueError` if value is not boolean or None
This is useful if a routine task has to be executed faster without
supervision or the need of debugging.
"""
if value is None:
- return GlobalConfig._smooth_mouse_drag
+ return cls._smooth_mouse_drag
elif value is True or value is False:
- GlobalConfig._smooth_mouse_drag = value
+ cls._smooth_mouse_drag = value
return None
else:
raise ValueError
+
#: whether to move the mouse cursor to a location instantly or smoothly
smooth_mouse_drag = property(fget=smooth_mouse_drag, fset=smooth_mouse_drag)
- def preprocess_special_chars(self, value: bool = None) -> bool | None:
+ def preprocess_special_chars(cls, value: bool = None) -> bool | None:
"""
- Same as :py:func:`GlobalConfig.smooth_mouse_drag` but with
+ Getter/setter for property attribute.
:param value: whether to preprocess capital and special characters and
handle them internally
+ :returns: current value if no argument was passed otherwise None
.. warning:: The characters will be forcefully preprocessed for the
autopy on linux (capital and special) and vncdotool (capital) backends.
"""
if value is None:
- return GlobalConfig._preprocess_special_chars
+ return cls._preprocess_special_chars
elif value is True or value is False:
- GlobalConfig._preprocess_special_chars = value
+ cls._preprocess_special_chars = value
return None
else:
raise ValueError
+
#: whether to preprocess capital and special characters and handle them internally
- preprocess_special_chars = property(fget=preprocess_special_chars, fset=preprocess_special_chars)
+ preprocess_special_chars = property(
+ fget=preprocess_special_chars, fset=preprocess_special_chars
+ )
- def save_needle_on_error(self, value: bool = None) -> bool | None:
+ def save_needle_on_error(cls, value: bool = None) -> bool | None:
"""
- Same as :py:func:`GlobalConfig.smooth_mouse_drag` but with
+ Getter/setter for property attribute.
:param value: whether to perform an extra needle dump on matching error
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._save_needle_on_error
+ return cls._save_needle_on_error
elif value is True or value is False:
- GlobalConfig._save_needle_on_error = value
+ cls._save_needle_on_error = value
return None
else:
raise ValueError
+
#: whether to perform an extra needle dump on matching error
- save_needle_on_error = property(fget=save_needle_on_error, fset=save_needle_on_error)
+ save_needle_on_error = property(
+ fget=save_needle_on_error, fset=save_needle_on_error
+ )
- def image_logging_level(self, value: int = None) -> int | None:
+ def image_logging_level(cls, value: int = None) -> int | None:
"""
Getter/setter for property attribute.
:param value: logging level similar to the python logging module
- :returns: current value if no argument was passed otherwise only sets it
+ :returns: current value if no argument was passed otherwise None
.. seealso:: See the image logging documentation for more details.
"""
if value is None:
- return GlobalConfig._image_logging_level
+ return cls._image_logging_level
else:
- GlobalConfig._image_logging_level = value
+ cls._image_logging_level = value
return None
+
#: logging level similar to the python logging module
image_logging_level = property(fget=image_logging_level, fset=image_logging_level)
- def image_logging_step_width(self, value: int = None) -> int | None:
+ def image_logging_step_width(cls, value: int = None) -> int | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_level` but with
+ Getter/setter for property attribute.
:param value: number of digits when enumerating the image
logging steps, e.g. value=3 for 001, 002, etc.
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._image_logging_step_width
+ return cls._image_logging_step_width
else:
- GlobalConfig._image_logging_step_width = value
+ cls._image_logging_step_width = value
return None
+
#: number of digits when enumerating the image logging steps, e.g. value=3 for 001, 002, etc.
- image_logging_step_width = property(fget=image_logging_step_width, fset=image_logging_step_width)
+ image_logging_step_width = property(
+ fget=image_logging_step_width, fset=image_logging_step_width
+ )
- def image_quality(self, value: int = None) -> int | None:
+ def image_quality(cls, value: int = None) -> int | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_level` but with
+ Getter/setter for property attribute.
:param value: quality of the image dumps ranging from 0 for no compression
to 9 for maximum compression (used to save space and reduce
the disk space needed for image logging)
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._image_quality
+ return cls._image_quality
else:
- GlobalConfig._image_quality = value
+ cls._image_quality = value
return None
+
#: quality of the image dumps ranging from 0 for no compression to 9 for maximum compression
# (used to save space and reduce the disk space needed for image logging)
image_quality = property(fget=image_quality, fset=image_quality)
- def image_logging_destination(self, value: str = None) -> str | None:
+ def image_logging_destination(cls, value: str = None) -> str | None:
"""
Getter/setter for property attribute.
:param value: relative path of the image logging steps
- :returns: current value if no argument was passed otherwise only sets it
+ :returns: current value if no argument was passed otherwise None
"""
if value is None:
- return GlobalConfig._image_logging_destination
+ return cls._image_logging_destination
else:
- GlobalConfig._image_logging_destination = value
+ cls._image_logging_destination = value
return None
+
#: relative path of the image logging steps
- image_logging_destination = property(fget=image_logging_destination, fset=image_logging_destination)
+ image_logging_destination = property(
+ fget=image_logging_destination, fset=image_logging_destination
+ )
- def display_control_backend(self, value: str = None) -> str | None:
+ def display_control_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the display control backend
+ :returns: current value if no argument was passed otherwise None
:raises: :py:class:`ValueError` if value is not among the supported backends
Supported backends:
@@ -347,22 +384,26 @@ def display_control_backend(self, value: str = None) -> str | None:
i.e. the backend has to be installed or you will have unsatisfied imports.
"""
if value is None:
- return GlobalConfig._display_control_backend
+ return cls._display_control_backend
else:
if value not in ["autopy", "xdotool", "vncdotool", "qemu", "pyautogui"]:
raise ValueError("Unsupported backend for GUI actions '%s'" % value)
- GlobalConfig._display_control_backend = value
+ cls._display_control_backend = value
return None
+
#: name of the display control backend
- display_control_backend = property(fget=display_control_backend, fset=display_control_backend)
+ display_control_backend = property(
+ fget=display_control_backend, fset=display_control_backend
+ )
# these methods do not check for valid values since this
# is already done during region and target initialization
- def find_backend(self, value: str = None) -> str | None:
+ def find_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the computer vision backend
+ :returns: current value if no argument was passed otherwise None
Supported backends:
* autopy - simple bitmap matching provided by AutoPy
@@ -385,164 +426,194 @@ def find_backend(self, value: str = None) -> str | None:
i.e. the backend has to be installed or you will have unsatisfied imports.
"""
if value is None:
- return GlobalConfig._find_backend
+ return cls._find_backend
else:
- GlobalConfig._find_backend = value
+ cls._find_backend = value
return None
+
#: name of the computer vision backend
find_backend = property(fget=find_backend, fset=find_backend)
- def contour_threshold_backend(self, value: str = None) -> str | None:
+ def contour_threshold_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the contour threshold backend
+ :returns: current value if no argument was passed otherwise None
Supported backends: normal, adaptive, canny.
"""
if value is None:
- return GlobalConfig._contour_threshold_backend
+ return cls._contour_threshold_backend
else:
- GlobalConfig._contour_threshold_backend = value
+ cls._contour_threshold_backend = value
return None
+
#: name of the contour threshold backend
- contour_threshold_backend = property(fget=contour_threshold_backend, fset=contour_threshold_backend)
+ contour_threshold_backend = property(
+ fget=contour_threshold_backend, fset=contour_threshold_backend
+ )
- def template_match_backend(self, value: str = None) -> str | None:
+ def template_match_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the template matching backend
+ :returns: current value if no argument was passed otherwise None
Supported backends: autopy, sqdiff, ccorr, ccoeff, sqdiff_normed,
ccorr_normed, ccoeff_normed.
"""
if value is None:
- return GlobalConfig._template_match_backend
+ return cls._template_match_backend
else:
- GlobalConfig._template_match_backend = value
+ cls._template_match_backend = value
return None
+
#: name of the template matching backend
- template_match_backend = property(fget=template_match_backend, fset=template_match_backend)
+ template_match_backend = property(
+ fget=template_match_backend, fset=template_match_backend
+ )
- def feature_detect_backend(self, value: str = None) -> str | None:
+ def feature_detect_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the feature detection backend
+ :returns: current value if no argument was passed otherwise None
Supported backends: BruteForce, BruteForce-L1, BruteForce-Hamming,
BruteForce-Hamming(2), in-house-raw, in-house-region.
"""
if value is None:
- return GlobalConfig._feature_detect_backend
+ return cls._feature_detect_backend
else:
- GlobalConfig._feature_detect_backend = value
+ cls._feature_detect_backend = value
return None
+
#: name of the feature detection backend
- feature_detect_backend = property(fget=feature_detect_backend, fset=feature_detect_backend)
+ feature_detect_backend = property(
+ fget=feature_detect_backend, fset=feature_detect_backend
+ )
- def feature_extract_backend(self, value: str = None) -> str | None:
+ def feature_extract_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the feature extraction backend
+ :returns: current value if no argument was passed otherwise None
Supported backends: ORB, FAST, STAR, GFTT, HARRIS, Dense, oldSURF.
"""
if value is None:
- return GlobalConfig._feature_extract_backend
+ return cls._feature_extract_backend
else:
- GlobalConfig._feature_extract_backend = value
+ cls._feature_extract_backend = value
return None
+
#: name of the feature extraction backend
- feature_extract_backend = property(fget=feature_extract_backend, fset=feature_extract_backend)
+ feature_extract_backend = property(
+ fget=feature_extract_backend, fset=feature_extract_backend
+ )
- def feature_match_backend(self, value: str = None) -> str | None:
+ def feature_match_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the feature matching backend
+ :returns: current value if no argument was passed otherwise None
Supported backends: ORB, BRIEF, FREAK.
"""
if value is None:
- return GlobalConfig._feature_match_backend
+ return cls._feature_match_backend
else:
- GlobalConfig._feature_match_backend = value
+ cls._feature_match_backend = value
return None
+
#: name of the feature matching backend
- feature_match_backend = property(fget=feature_match_backend, fset=feature_match_backend)
+ feature_match_backend = property(
+ fget=feature_match_backend, fset=feature_match_backend
+ )
- def text_detect_backend(self, value: str = None) -> str | None:
+ def text_detect_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the text detection backend
+ :returns: current value if no argument was passed otherwise None
Supported backends: east, erstat, contours, components.
"""
if value is None:
- return GlobalConfig._text_detect_backend
+ return cls._text_detect_backend
else:
- GlobalConfig._text_detect_backend = value
+ cls._text_detect_backend = value
return None
+
#: name of the text detection backend
text_detect_backend = property(fget=text_detect_backend, fset=text_detect_backend)
- def text_ocr_backend(self, value: str = None) -> str | None:
+ def text_ocr_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the optical character recognition backend
+ :returns: current value if no argument was passed otherwise None
Supported backends: pytesseract, tesserocr, tesseract (OpenCV), hmm, beamSearch.
"""
if value is None:
- return GlobalConfig._text_ocr_backend
+ return cls._text_ocr_backend
else:
- GlobalConfig._text_ocr_backend = value
+ cls._text_ocr_backend = value
return None
+
#: name of the optical character recognition backend
text_ocr_backend = property(fget=text_ocr_backend, fset=text_ocr_backend)
- def deep_learn_backend(self, value: str = None) -> str | None:
+ def deep_learn_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the deep learning backend
+ :returns: current value if no argument was passed otherwise None
Supported backends: pytorch, tensorflow (partial).
"""
if value is None:
- return GlobalConfig._deep_learn_backend
+ return cls._deep_learn_backend
else:
- GlobalConfig._deep_learn_backend = value
+ cls._deep_learn_backend = value
return None
+
#: name of the deep learning backend
deep_learn_backend = property(fget=deep_learn_backend, fset=deep_learn_backend)
- def hybrid_match_backend(self, value: str = None) -> str | None:
+ def hybrid_match_backend(cls, value: str = None) -> str | None:
"""
- Same as :py:func:`GlobalConfig.image_logging_destination` but with
+ Getter/setter for property attribute.
:param value: name of the hybrid matching backend for unconfigured one-step targets
+ :returns: current value if no argument was passed otherwise None
Supported backends: all nonhybrid backends of :py:func:`GlobalConfig.find_backend`.
"""
if value is None:
- return GlobalConfig._hybrid_match_backend
+ return cls._hybrid_match_backend
else:
- GlobalConfig._hybrid_match_backend = value
+ cls._hybrid_match_backend = value
return None
+
#: name of the hybrid matching backend for unconfigured one-step targets
- hybrid_match_backend = property(fget=hybrid_match_backend, fset=hybrid_match_backend)
+ hybrid_match_backend = property(
+ fget=hybrid_match_backend, fset=hybrid_match_backend
+ )
class GlobalConfig(object, metaclass=GlobalConfig): # type: ignore
"""
- Handler for default configuration present in all
- cases where no specific value is set.
+ Handler for default configuration present in all cases where no specific value is set.
The methods of this class are shared among
all of its instances.
@@ -553,9 +624,10 @@ class GlobalConfig(object, metaclass=GlobalConfig): # type: ignore
class TemporaryConfig(object):
"""
- Proxies a GlobalConfig instance extending it to add context
- support, such that once this context ends the changes to the
- wrapped config object are restored.
+ Proxy a GlobalConfig instance extending it to add context support.
+
+ The context support is such that once this context ends the changes
+ to the wrapped config object are restored.
This is useful when we have a global config instance and need to
change it only for a few operations.
@@ -580,10 +652,12 @@ def __init__(self) -> None:
object.__setattr__(self, "_original_values", {})
def __getattribute__(self, name: Any) -> Any:
+ """Get attribute given a name."""
# fallback to GlobalConfig
return getattr(GlobalConfig, name)
def __setattr__(self, name: Any, value: Any) -> None:
+ """Set attribute given a name and a value."""
original_values = object.__getattribute__(self, "_original_values")
# store the original value only at the first set operation,
# so further changes won't overwrite the history
@@ -592,10 +666,12 @@ def __setattr__(self, name: Any, value: Any) -> None:
setattr(GlobalConfig, name, value)
def __enter__(self) -> "TemporaryConfig":
+ """Set up context manager upon entry."""
# our temporary config object
return self
def __exit__(self, *_: tuple[type, ...]) -> None:
+ """Clean up context manager upon exit."""
original_values = object.__getattribute__(self, "_original_values")
# restore original configuration values
for name, value in original_values.items():
@@ -606,10 +682,10 @@ def __exit__(self, *_: tuple[type, ...]) -> None:
class LocalConfig(object):
"""
- Container for the configuration of all display control and
- computer vision backends, responsible for making them behave
- according to the selected parameters as well as for providing
- information about them and the current parameters.
+ Contain locally the configuration of all display control and computer vision backends.
+
+ The local container is reponsible for making them behave according to the selected
+ parameters as well as for providing information about them and the current parameters.
"""
def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
@@ -639,24 +715,30 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
if synchronize:
self.__synchronize_backend()
- def __configure_backend(self, backend: str = None, category: str ="type",
- reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "type", reset: bool = False
+ ) -> None:
if category != "type":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
# reset makes no sense here since this is the base configuration
pass
if backend is None:
backend = "cv"
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
self.params[category] = {}
self.params[category]["backend"] = backend
- def configure_backend(self, backend: str = None, category: str = "type",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "type", reset: bool = False
+ ) -> None:
"""
Generate configuration dictionary for a given backend.
@@ -681,20 +763,26 @@ def configure(self, reset: bool = True, **kwargs: dict[str, type]) -> None:
"""
self.configure_backend(reset=reset)
- def __synchronize_backend(self, backend: str = None, category: str = "type",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "type", reset: bool = False
+ ) -> None:
if category != "type":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
# reset makes no sense here since this is the base configuration
pass
# no backend object to sync to
backend = "cv" if backend is None else backend
if backend not in self.algorithms[self.categories[category]]:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
- def synchronize_backend(self, backend: str = None, category: str = "type",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "type", reset: bool = False
+ ) -> None:
"""
Synchronize a category backend with the equalizer configuration.
@@ -707,7 +795,9 @@ def synchronize_backend(self, backend: str = None, category: str = "type",
"""
self.__synchronize_backend(backend, category, reset)
- def synchronize(self, *args: tuple[type, ...], reset: bool = True, **kwargs: dict[str, type]) -> None:
+ def synchronize(
+ self, *args: tuple[type, ...], reset: bool = True, **kwargs: dict[str, type]
+ ) -> None:
"""
Synchronize all backends with the current configuration dictionary.
diff --git a/guibot/controller.py b/guibot/controller.py
index e5a08bcc..061d3ed8 100644
--- a/guibot/controller.py
+++ b/guibot/controller.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Display controllers (DC backends) to perform user operations.
SUMMARY
------------------------------------------------------
-Display controllers (DC backends) to perform user operations.
INTERFACE
@@ -40,15 +40,22 @@
from .errors import *
-log = logging.getLogger('guibot.controller')
-__all__ = ['Controller', 'AutoPyController', 'XDoToolController',
- 'VNCDoToolController', 'PyAutoGUIController']
+log = logging.getLogger("guibot.controller")
+__all__ = [
+ "Controller",
+ "AutoPyController",
+ "XDoToolController",
+ "VNCDoToolController",
+ "PyAutoGUIController",
+]
class Controller(LocalConfig):
"""
- Screen control backend, responsible for performing desktop operations
- like mouse clicking, key pressing, text typing, etc.
+ Screen control backend, responsible for performing display operations.
+
+ Examples of display operations include mouse clicking mouse clicking,
+ key pressing, text typing, etc.
"""
def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
@@ -57,8 +64,12 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
# available and currently fully compatible methods
self.categories["control"] = "control_methods"
- self.algorithms["control_methods"] = ["autopy", "pyautogui",
- "xdotool", "vncdotool"]
+ self.algorithms["control_methods"] = [
+ "autopy",
+ "pyautogui",
+ "xdotool",
+ "vncdotool",
+ ]
# other attributes
self._backend_obj = None
@@ -83,6 +94,7 @@ def get_width(self) -> int:
:returns: width of the connected screen
"""
return self._width
+
width = property(fget=get_width)
def get_height(self) -> int:
@@ -92,6 +104,7 @@ def get_height(self) -> int:
:returns: height of the connected screen
"""
return self._height
+
height = property(fget=get_height)
def get_keymap(self) -> inputmap.Key:
@@ -101,6 +114,7 @@ def get_keymap(self) -> inputmap.Key:
:returns: map of keys to be used for the connected screen
"""
return self._keymap
+
keymap = property(fget=get_keymap)
def get_mousemap(self) -> inputmap.MouseButton:
@@ -110,6 +124,7 @@ def get_mousemap(self) -> inputmap.MouseButton:
:returns: map of mouse buttons to be used for the connected screen
"""
return self._mousemap
+
mousemap = property(fget=get_mousemap)
def get_modmap(self) -> inputmap.KeyModifier:
@@ -119,6 +134,7 @@ def get_modmap(self) -> inputmap.KeyModifier:
:returns: map of modifier keys to be used for the connected screen
"""
return self._modmap
+
modmap = property(fget=get_modmap)
def get_mouse_location(self) -> Location:
@@ -128,46 +144,63 @@ def get_mouse_location(self) -> Location:
:returns: location of the mouse pointer
"""
return self._pointer
+
mouse_location = property(fget=get_mouse_location)
- def __configure_backend(self, backend: str = None, category: str = "control",
- reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "control", reset: bool = False
+ ) -> None:
if category != "control":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(Controller, self).configure_backend("dc", reset=True)
if backend is None:
backend = GlobalConfig.display_control_backend
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
log.log(9, "Setting backend for %s to %s", category, backend)
self.params[category] = {}
self.params[category]["backend"] = backend
log.log(9, "%s %s\n", category, self.params[category])
- def configure_backend(self, backend: str = None, category: str = "control",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "control", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __synchronize_backend(self, backend: str = None, category: str = "control",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "control", reset: bool = False
+ ) -> None:
if category != "control":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(Controller, self).synchronize_backend("dc", reset=True)
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
- def synchronize_backend(self, backend: str = None, category: str = "control",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "control", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
@@ -207,7 +240,7 @@ def _region_from_args(self, *args: "Region") -> tuple[int, int, int, int, str]:
height = self._height - ypos
# TODO: Switch to in-memory conversion - patch backends or request get_raw() from authors
- with NamedTemporaryFile(prefix='guibot', suffix='.png') as f:
+ with NamedTemporaryFile(prefix="guibot", suffix=".png") as f:
# NOTE: the file can be open twice on unix but only once on windows so simply
# use the generated filename to avoid this difference and remove it manually
filename = f.name
@@ -222,7 +255,9 @@ def capture_screen(self, *args: "list[int] | Region | None") -> Image:
:returns: image of the current screen
:raises: :py:class:`NotImplementedError` if the base class method is called
"""
- raise NotImplementedError("Method is not available for this controller implementation")
+ raise NotImplementedError(
+ "Method is not available for this controller implementation"
+ )
def mouse_move(self, location: Location, smooth: bool = True) -> None:
"""
@@ -232,9 +267,13 @@ def mouse_move(self, location: Location, smooth: bool = True) -> None:
:param smooth: whether to sue smooth transition or just teleport the mouse
:raises: :py:class:`NotImplementedError` if the base class method is called
"""
- raise NotImplementedError("Method is not available for this controller implementation")
+ raise NotImplementedError(
+ "Method is not available for this controller implementation"
+ )
- def mouse_click(self, button: int = None, count: int = 1, modifiers: list[str] = None) -> None:
+ def mouse_click(
+ self, button: int = None, count: int = 1, modifiers: list[str] = None
+ ) -> None:
"""
Click the selected mouse button N times at the current mouse location.
@@ -244,7 +283,9 @@ def mouse_click(self, button: int = None, count: int = 1, modifiers: list[str] =
(see :py:class:`inputmap.KeyModifier` for extensive list)
:raises: :py:class:`NotImplementedError` if the base class method is called
"""
- raise NotImplementedError("Method is not available for this controller implementation")
+ raise NotImplementedError(
+ "Method is not available for this controller implementation"
+ )
def mouse_down(self, button: int) -> None:
"""
@@ -254,7 +295,9 @@ def mouse_down(self, button: int) -> None:
(see :py:class:`inputmap.MouseButton` for extensive list)
:raises: :py:class:`NotImplementedError` if the base class method is called
"""
- raise NotImplementedError("Method is not available for this controller implementation")
+ raise NotImplementedError(
+ "Method is not available for this controller implementation"
+ )
def mouse_up(self, button: int) -> None:
"""
@@ -264,7 +307,9 @@ def mouse_up(self, button: int) -> None:
(see :py:class:`inputmap.MouseButton` for extensive list)
:raises: :py:class:`NotImplementedError` if the base class method is called
"""
- raise NotImplementedError("Method is not available for this controller implementation")
+ raise NotImplementedError(
+ "Method is not available for this controller implementation"
+ )
def mouse_scroll(self, clicks: int = 10, horizontal: bool = False) -> None:
"""
@@ -275,7 +320,9 @@ def mouse_scroll(self, clicks: int = 10, horizontal: bool = False) -> None:
(only available on some platforms)
:raises: :py:class:`NotImplementedError` if the base class method is called
"""
- raise NotImplementedError("Method is not available for this controller implementation")
+ raise NotImplementedError(
+ "Method is not available for this controller implementation"
+ )
def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None:
"""
@@ -286,7 +333,9 @@ def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None:
:param up_down: hold down if true else release
:raises: :py:class:`NotImplementedError` if the base class method is called
"""
- raise NotImplementedError("Method is not available for this controller implementation")
+ raise NotImplementedError(
+ "Method is not available for this controller implementation"
+ )
def keys_press(self, keys: list[str] | str) -> None:
"""
@@ -308,13 +357,16 @@ def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None:
(see :py:class:`inputmap.KeyModifier` for extensive list)
:raises: :py:class:`NotImplementedError` if the base class method is called
"""
- raise NotImplementedError("Method is not available for this controller implementation")
+ raise NotImplementedError(
+ "Method is not available for this controller implementation"
+ )
class AutoPyController(Controller):
"""
- Screen control backend implemented through AutoPy which is a small
- python library portable to Windows and Linux operating systems.
+ Screen control backend implemented through AutoPy.
+
+ AutoPy is a small python library portable to Windows and Linux operating systems.
"""
def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
@@ -327,6 +379,8 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
def get_mouse_location(self) -> Location:
"""
+ Getter for readonly attribute.
+
Custom implementation of the base method.
See base method for details.
@@ -334,40 +388,57 @@ def get_mouse_location(self) -> Location:
loc = self._backend_obj.mouse.location()
# newer versions do their own scale conversion
version = self._backend_obj.__version__.split(".")
- if int(version[0]) > 3 or int(version[0]) == 3 and (int(version[1]) > 0 or int(version[2]) > 0):
+ if (
+ int(version[0]) > 3
+ or int(version[0]) == 3
+ and (int(version[1]) > 0 or int(version[2]) > 0)
+ ):
return Location(int(loc[0] * self._scale), int(loc[1] * self._scale))
return Location(int(loc[0] / self._scale), int(loc[1] / self._scale))
+
mouse_location = property(fget=get_mouse_location)
- def __configure_backend(self, backend: str = None, category: str = "autopy",
- reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "autopy", reset: bool = False
+ ) -> None:
if category != "autopy":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(AutoPyController, self).configure_backend("autopy", reset=True)
self.params[category] = {}
self.params[category]["backend"] = "none"
- def configure_backend(self, backend: str = None, category: str = "autopy",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "autopy", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __synchronize_backend(self, backend: str = None, category: str = "autopy",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "autopy", reset: bool = False
+ ) -> None:
if category != "autopy":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(AutoPyController, self).synchronize_backend("autopy", reset=True)
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
import autopy
+
self._backend_obj = autopy
self._scale = self._backend_obj.screen.scale()
@@ -379,9 +450,12 @@ def __synchronize_backend(self, backend: str = None, category: str = "autopy",
self._modmap = inputmap.AutoPyKeyModifier()
self._mousemap = inputmap.AutoPyMouseButton()
- def synchronize_backend(self, backend: str = None, category: str = "autopy",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "autopy", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
@@ -390,6 +464,8 @@ def synchronize_backend(self, backend: str = None, category: str = "autopy",
def capture_screen(self, *args: "list[int] | Region | None") -> Image:
"""
+ Get the current screen as image.
+
Custom implementation of the base method.
See base method for details.
@@ -397,22 +473,35 @@ def capture_screen(self, *args: "list[int] | Region | None") -> Image:
xpos, ypos, width, height, filename = self._region_from_args(*args)
# autopy works in points and requires a minimum of one point along a dimension
- xpos, ypos, width, height = xpos / self._scale, ypos / self._scale, width / self._scale, height / self._scale
- xpos, ypos = float(xpos) - (1.0 - float(width)) if width < 1.0 else xpos, float(ypos) - (1.0 - float(height)) if height < 1.0 else ypos
- height, width = 1.0 if float(height) < 1.0 else height, 1.0 if float(width) < 1.0 else width
+ xpos, ypos, width, height = (
+ xpos / self._scale,
+ ypos / self._scale,
+ width / self._scale,
+ height / self._scale,
+ )
+ xpos, ypos = float(xpos) - (1.0 - float(width)) if width < 1.0 else xpos, (
+ float(ypos) - (1.0 - float(height)) if height < 1.0 else ypos
+ )
+ height, width = 1.0 if float(height) < 1.0 else height, (
+ 1.0 if float(width) < 1.0 else width
+ )
try:
- autopy_bmp = self._backend_obj.bitmap.capture_screen(((xpos, ypos), (width, height)))
+ autopy_bmp = self._backend_obj.bitmap.capture_screen(
+ ((xpos, ypos), (width, height))
+ )
except ValueError:
- return Image("", PIL.Image.new('RGB', (1, 1)))
+ return Image("", PIL.Image.new("RGB", (1, 1)))
autopy_bmp.save(filename)
with PIL.Image.open(filename) as f:
- pil_image = f.convert('RGB')
+ pil_image = f.convert("RGB")
os.unlink(filename)
return Image("", pil_image)
def mouse_move(self, location: Location, smooth: bool = True) -> None:
"""
+ Move the mouse to a desired location.
+
Custom implementation of the base method.
See base method for details.
@@ -424,9 +513,12 @@ def mouse_move(self, location: Location, smooth: bool = True) -> None:
self._backend_obj.mouse.move(x, y)
self._pointer = location
- def mouse_click(self, button: int = None, count: int = 1,
- modifiers: list[str] = None) -> None:
+ def mouse_click(
+ self, button: int = None, count: int = 1, modifiers: list[str] = None
+ ) -> None:
"""
+ Click the selected mouse button N times at the current mouse location.
+
Custom implementation of the base method.
See base method for details.
@@ -447,6 +539,8 @@ def mouse_click(self, button: int = None, count: int = 1,
def mouse_down(self, button: int) -> None:
"""
+ Hold down a mouse button.
+
Custom implementation of the base method.
See base method for details.
@@ -455,6 +549,8 @@ def mouse_down(self, button: int) -> None:
def mouse_up(self, button: int) -> None:
"""
+ Release a mouse button.
+
Custom implementation of the base method.
See base method for details.
@@ -463,6 +559,8 @@ def mouse_up(self, button: int) -> None:
def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None:
"""
+ Hold down or release together all provided keys.
+
Custom implementation of the base method.
See base method for details.
@@ -472,6 +570,8 @@ def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None:
def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None:
"""
+ Type (press consecutively) all provided keys.
+
Custom implementation of the base method.
See base method for details.
@@ -491,10 +591,7 @@ def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None:
class XDoToolController(Controller):
- """
- Screen control backend implemented through the xdotool client and
- thus portable to Linux operating systems.
- """
+ """Screen control backend implemented through the xdotool client and thus portable to Linux operating systems."""
def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
"""Build a DC backend using XDoTool."""
@@ -506,6 +603,8 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
def get_mouse_location(self) -> Location:
"""
+ Getter for readonly attribute.
+
Custom implementation of the base method.
See base method for details.
@@ -514,12 +613,16 @@ def get_mouse_location(self) -> Location:
x = re.search(r"x:(\d+)", pos).group(1)
y = re.search(r"y:(\d+)", pos).group(1)
return Location(int(x), int(y))
+
mouse_location = property(fget=get_mouse_location)
- def __configure_backend(self, backend: str = None, category: str = "xdotool",
- reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "xdotool", reset: bool = False
+ ) -> None:
if category != "xdotool":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(XDoToolController, self).configure_backend("xdotool", reset=True)
@@ -527,33 +630,44 @@ def __configure_backend(self, backend: str = None, category: str = "xdotool",
self.params[category]["backend"] = "none"
self.params[category]["binary"] = "xdotool"
- def configure_backend(self, backend: str = None, category: str = "xdotool",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "xdotool", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __synchronize_backend(self, backend: str = None, category: str = "xdotool",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "xdotool", reset: bool = False
+ ) -> None:
if category != "xdotool":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(XDoToolController, self).synchronize_backend("xdotool", reset=True)
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
import subprocess
+
class XDoTool(object):
def __init__(self, dc: Controller) -> None:
self.dc = dc
+
def run(self, command: str, *args: list[str]) -> str:
process = [self.dc.params[category]["binary"]]
process += [command]
process += args
return subprocess.check_output(process, shell=False).decode()
+
self._backend_obj = XDoTool(self)
self._width, self._height = self._backend_obj.run("getdisplaygeometry").split()
@@ -563,9 +677,12 @@ def run(self, command: str, *args: list[str]) -> str:
self._modmap = inputmap.XDoToolKeyModifier()
self._mousemap = inputmap.XDoToolMouseButton()
- def synchronize_backend(self, backend: str = None, category: str = "xdotool",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "xdotool", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
@@ -574,38 +691,59 @@ def synchronize_backend(self, backend: str = None, category: str = "xdotool",
def capture_screen(self, *args: "list[int] | Region | None") -> Image:
"""
+ Get the current screen as image.
+
Custom implementation of the base method.
See base method for details.
"""
xpos, ypos, width, height, filename = self._region_from_args(*args)
import subprocess
- with subprocess.Popen(("xwd", "-silent", "-root"), stdout=subprocess.PIPE) as xwd:
- subprocess.call(("convert", "xwd:-", "-crop", "%sx%s+%s+%s" % (width, height, xpos, ypos), filename), stdin=xwd.stdout)
+
+ with subprocess.Popen(
+ ("xwd", "-silent", "-root"), stdout=subprocess.PIPE
+ ) as xwd:
+ subprocess.call(
+ (
+ "convert",
+ "xwd:-",
+ "-crop",
+ "%sx%s+%s+%s" % (width, height, xpos, ypos),
+ filename,
+ ),
+ stdin=xwd.stdout,
+ )
with PIL.Image.open(filename) as f:
- pil_image = f.convert('RGB')
+ pil_image = f.convert("RGB")
os.unlink(filename)
return Image("", pil_image)
def mouse_move(self, location: Location, smooth: bool = True) -> None:
"""
+ Move the mouse to a desired location.
+
Custom implementation of the base method.
See base method for details.
"""
if smooth:
# TODO: implement smooth mouse move?
- log.warning("Smooth mouse move is not supported for the XDO controller,"
- " defaulting to instant mouse move")
+ log.warning(
+ "Smooth mouse move is not supported for the XDO controller,"
+ " defaulting to instant mouse move"
+ )
self._backend_obj.run("mousemove", str(location.x), str(location.y))
# handle race conditions where the backend coordinates are updated too
# slowly by giving some time for the new location to take effect there
time.sleep(0.3)
self._pointer = location
- def mouse_click(self, button: int = None, count: int = 1,
- modifiers: list[str] = None) -> None:
+ def mouse_click(
+ self, button: int = None, count: int = 1, modifiers: list[str] = None
+ ) -> None:
"""
+ Click the selected mouse button N times at the current mouse location.
+
Custom implementation of the base method.
See base method for details.
@@ -627,6 +765,8 @@ def mouse_click(self, button: int = None, count: int = 1,
def mouse_down(self, button: int) -> None:
"""
+ Hold down a mouse button.
+
Custom implementation of the base method.
See base method for details.
@@ -635,6 +775,8 @@ def mouse_down(self, button: int) -> None:
def mouse_up(self, button: int) -> None:
"""
+ Release a mouse button.
+
Custom implementation of the base method.
See base method for details.
@@ -643,18 +785,22 @@ def mouse_up(self, button: int) -> None:
def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None:
"""
+ Hold down or release together all provided keys.
+
Custom implementation of the base method.
See base method for details.
"""
for key in keys:
if up_down:
- self._backend_obj.run('keydown', str(key))
+ self._backend_obj.run("keydown", str(key))
else:
- self._backend_obj.run('keyup', str(key))
+ self._backend_obj.run("keyup", str(key))
def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None:
"""
+ Type (press consecutively) all provided keys.
+
Custom implementation of the base method.
See base method for details.
@@ -663,7 +809,7 @@ def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None:
self.keys_toggle(modifiers, True)
for part in text:
- self._backend_obj.run('type', str(part))
+ self._backend_obj.run("type", str(part))
if modifiers is not None:
self.keys_toggle(modifiers, False)
@@ -671,8 +817,9 @@ def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None:
class VNCDoToolController(Controller):
"""
- Screen control backend implemented through the VNCDoTool client and
- thus portable to any guest OS that is accessible through a VNC/RFB protocol.
+ Screen control backend implemented through the VNCDoTool client.
+
+ This backend is thus portable to any guest OS that is accessible through a VNC/RFB protocol.
"""
def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
@@ -683,9 +830,13 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
if synchronize:
self.__synchronize_backend(reset=False)
- def __configure_backend(self, backend: str = None, category: str = "vncdotool", reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "vncdotool", reset: bool = False
+ ) -> None:
if category != "vncdotool":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(VNCDoToolController, self).configure_backend("vncdotool", reset=True)
@@ -698,42 +849,58 @@ def __configure_backend(self, backend: str = None, category: str = "vncdotool",
# password for the vnc server
self.params[category]["vnc_password"] = None
- def configure_backend(self, backend: str = None, category: str = "vncdotool",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "vncdotool", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __synchronize_backend(self, backend: str = None, category: str = "vncdotool",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "vncdotool", reset: bool = False
+ ) -> None:
if category != "vncdotool":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
- super(VNCDoToolController, self).synchronize_backend("vncdotool", reset=True)
+ super(VNCDoToolController, self).synchronize_backend(
+ "vncdotool", reset=True
+ )
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
from vncdotool import api
+
if self._backend_obj:
# api.connect() gives us a threaded client, so we need to clean up resources
# to avoid dangling connections and deadlocks if synchronizing more than once
self._backend_obj.disconnect()
- self._backend_obj = api.connect('%s:%i' % (self.params[category]["vnc_hostname"],
- self.params[category]["vnc_port"]),
- self.params[category]["vnc_password"])
+ self._backend_obj = api.connect(
+ "%s:%i"
+ % (
+ self.params[category]["vnc_hostname"],
+ self.params[category]["vnc_port"],
+ ),
+ self.params[category]["vnc_password"],
+ )
# for special characters preprocessing for the vncdotool
self._backend_obj.factory.force_caps = True
# additional logging for vncdotool available so let's make use of it
- logging.getLogger('vncdotool.client').setLevel(10)
- logging.getLogger('vncdotool').setLevel(logging.ERROR)
- logging.getLogger('twisted').setLevel(logging.ERROR)
+ logging.getLogger("vncdotool.client").setLevel(10)
+ logging.getLogger("vncdotool").setLevel(logging.ERROR)
+ logging.getLogger("twisted").setLevel(logging.ERROR)
# screen size
- with NamedTemporaryFile(prefix='guibot', suffix='.png') as f:
+ with NamedTemporaryFile(prefix="guibot", suffix=".png") as f:
filename = f.name
screen = self._backend_obj.captureScreen(filename)
os.unlink(filename)
@@ -749,9 +916,12 @@ def __synchronize_backend(self, backend: str = None, category: str = "vncdotool"
self._modmap = inputmap.VNCDoToolKeyModifier()
self._mousemap = inputmap.VNCDoToolMouseButton()
- def synchronize_backend(self, backend: str = None, category: str = "vncdotool",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "vncdotool", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
@@ -760,18 +930,24 @@ def synchronize_backend(self, backend: str = None, category: str = "vncdotool",
def capture_screen(self, *args: "list[int] | Region | None") -> Image:
"""
+ Get the current screen as image.
+
Custom implementation of the base method.
See base method for details.
"""
xpos, ypos, width, height, _ = self._region_from_args(*args)
self._backend_obj.refreshScreen()
- cropped = self._backend_obj.screen.crop((xpos, ypos, xpos + width, ypos + height))
- pil_image = cropped.convert('RGB')
+ cropped = self._backend_obj.screen.crop(
+ (xpos, ypos, xpos + width, ypos + height)
+ )
+ pil_image = cropped.convert("RGB")
return Image("", pil_image)
def mouse_move(self, location: Location, smooth: bool = True) -> None:
"""
+ Move the mouse to a desired location.
+
Custom implementation of the base method.
See base method for details.
@@ -782,9 +958,12 @@ def mouse_move(self, location: Location, smooth: bool = True) -> None:
self._backend_obj.mouseMove(location.x, location.y)
self._pointer = location
- def mouse_click(self, button: int = None, count: int = 1,
- modifiers: list[str] = None) -> None:
+ def mouse_click(
+ self, button: int = None, count: int = 1, modifiers: list[str] = None
+ ) -> None:
"""
+ Click the selected mouse button N times at the current mouse location.
+
Custom implementation of the base method.
See base method for details.
@@ -807,6 +986,8 @@ def mouse_click(self, button: int = None, count: int = 1,
def mouse_down(self, button: int) -> None:
"""
+ Hold down a mouse button.
+
Custom implementation of the base method.
See base method for details.
@@ -815,6 +996,8 @@ def mouse_down(self, button: int) -> None:
def mouse_up(self, button: int) -> None:
"""
+ Release a mouse button.
+
Custom implementation of the base method.
See base method for details.
@@ -823,24 +1006,28 @@ def mouse_up(self, button: int) -> None:
def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None:
"""
+ Hold down or release together all provided keys.
+
Custom implementation of the base method.
See base method for details.
"""
for key in keys:
if key == "\\":
- key = 'bslash'
+ key = "bslash"
elif key == "/":
- key = 'fslash'
+ key = "fslash"
elif key == " ":
- key = 'space'
+ key = "space"
if up_down:
self._backend_obj.keyDown(key)
else:
self._backend_obj.keyUp(key)
- def keys_type(self, text: list[str] | str, modifiers: list [str] = None) -> None:
+ def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None:
"""
+ Type (press consecutively) all provided keys.
+
Custom implementation of the base method.
See base method for details.
@@ -851,13 +1038,13 @@ def keys_type(self, text: list[str] | str, modifiers: list [str] = None) -> None
for part in text:
for char in str(part):
if char == "\\":
- char = 'bslash'
+ char = "bslash"
elif char == "/":
- char = 'fslash'
+ char = "fslash"
elif char == " ":
- char = 'space'
+ char = "space"
elif char == "\n":
- char = 'return'
+ char = "return"
time.sleep(GlobalConfig.delay_between_keys)
self._backend_obj.keyPress(char)
@@ -867,8 +1054,9 @@ def keys_type(self, text: list[str] | str, modifiers: list [str] = None) -> None
class PyAutoGUIController(Controller):
"""
- Screen control backend implemented through PyAutoGUI which is a python
- library portable to MacOS, Windows, and Linux operating systems.
+ Screen control backend implemented through PyAutoGUI.
+
+ PyAutoGUI is a python library portable to MacOS, Windows, and Linux operating systems.
"""
def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
@@ -881,43 +1069,60 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
def get_mouse_location(self) -> Location:
"""
+ Getter for readonly attribute.
+
Custom implementation of the base method.
See base method for details.
"""
x, y = self._backend_obj.position()
return Location(x, y)
+
mouse_location = property(fget=get_mouse_location)
- def __configure_backend(self, backend: str = None, category: str = "pyautogui",
- reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "pyautogui", reset: bool = False
+ ) -> None:
if category != "pyautogui":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(PyAutoGUIController, self).configure_backend("pyautogui", reset=True)
self.params[category] = {}
self.params[category]["backend"] = "none"
- def configure_backend(self, backend: str = None, category: str = "pyautogui",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "pyautogui", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __synchronize_backend(self, backend: str = None, category: str = "pyautogui",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "pyautogui", reset: bool = False
+ ) -> None:
if category != "pyautogui":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
- super(PyAutoGUIController, self).synchronize_backend("pyautogui", reset=True)
+ super(PyAutoGUIController, self).synchronize_backend(
+ "pyautogui", reset=True
+ )
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
import pyautogui
+
# allow for (0,0) and edge coordinates
pyautogui.FAILSAFE = False
self._backend_obj = pyautogui
@@ -928,9 +1133,12 @@ def __synchronize_backend(self, backend: str = None, category: str = "pyautogui"
self._modmap = inputmap.PyAutoGUIKeyModifier()
self._mousemap = inputmap.PyAutoGUIMouseButton()
- def synchronize_backend(self, backend: str = None, category: str = "pyautogui",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "pyautogui", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
@@ -939,6 +1147,8 @@ def synchronize_backend(self, backend: str = None, category: str = "pyautogui",
def capture_screen(self, *args: "list[int] | Region | None") -> Image:
"""
+ Get the current screen as image.
+
Custom implementation of the base method.
See base method for details.
@@ -950,6 +1160,8 @@ def capture_screen(self, *args: "list[int] | Region | None") -> Image:
def mouse_move(self, location: Location, smooth: bool = True) -> None:
"""
+ Move the mouse to a desired location.
+
Custom implementation of the base method.
See base method for details.
@@ -960,9 +1172,12 @@ def mouse_move(self, location: Location, smooth: bool = True) -> None:
self._backend_obj.moveTo(location.x, location.y)
self._pointer = location
- def mouse_click(self, button: int = None, count: int = 1,
- modifiers: list[str] = None) -> None:
+ def mouse_click(
+ self, button: int = None, count: int = 1, modifiers: list[str] = None
+ ) -> None:
"""
+ Click the selected mouse button N times at the current mouse location.
+
Custom implementation of the base method.
See base method for details.
@@ -985,6 +1200,8 @@ def mouse_click(self, button: int = None, count: int = 1,
def mouse_down(self, button: int) -> None:
"""
+ Hold down a mouse button.
+
Custom implementation of the base method.
See base method for details.
@@ -993,6 +1210,8 @@ def mouse_down(self, button: int) -> None:
def mouse_up(self, button: int) -> None:
"""
+ Release a mouse button.
+
Custom implementation of the base method.
See base method for details.
@@ -1001,6 +1220,8 @@ def mouse_up(self, button: int) -> None:
def mouse_scroll(self, clicks: int = 10, horizontal: bool = False) -> None:
"""
+ Scroll the mouse for a number of clicks.
+
Custom implementation of the base method.
See base method for details.
@@ -1010,8 +1231,10 @@ def mouse_scroll(self, clicks: int = 10, horizontal: bool = False) -> None:
else:
self._backend_obj.scroll(clicks)
- def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None:
+ def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None:
"""
+ Hold down or release together all provided keys.
+
Custom implementation of the base method.
See base method for details.
@@ -1024,6 +1247,8 @@ def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None:
def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None:
"""
+ Type (press consecutively) all provided keys.
+
Custom implementation of the base method.
See base method for details.
diff --git a/guibot/desktopcontrol.py b/guibot/desktopcontrol.py
index 70c85288..1875e426 100644
--- a/guibot/desktopcontrol.py
+++ b/guibot/desktopcontrol.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Old module for display controllers (DC backends) - to be deprecated.
SUMMARY
------------------------------------------------------
-Old module for display controllers (DC backends) - to be deprecated.
INTERFACE
@@ -30,8 +30,9 @@
from .controller import *
-logging.getLogger("guibot.desktopcontrol")\
- .warn("The `desktopcontrol` module is deprecated, use `controller` instead.")
+logging.getLogger("guibot.desktopcontrol").warn(
+ "The `desktopcontrol` module is deprecated, use `controller` instead."
+)
DesktopControl = Controller
diff --git a/guibot/errors.py b/guibot/errors.py
index bf7adb0c..fc6130f1 100644
--- a/guibot/errors.py
+++ b/guibot/errors.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Exceptions used by all guibot interfaces and modules.
SUMMARY
------------------------------------------------------
-Exceptions used by all guibot interfaces and modules.
INTERFACE
@@ -26,31 +26,37 @@
"""
-__all__ = ['GuiBotError', 'FileNotFoundError',
- 'IncompatibleTargetError', 'IncompatibleTargetFileError',
- 'FindError', 'NotFindError',
- 'UnsupportedBackendError', 'MissingHotmapError',
- 'UninitializedBackendError']
+__all__ = [
+ "GuiBotError",
+ "FileNotFoundError",
+ "IncompatibleTargetError",
+ "IncompatibleTargetFileError",
+ "FindError",
+ "NotFindError",
+ "UnsupportedBackendError",
+ "MissingHotmapError",
+ "UninitializedBackendError",
+]
class GuiBotError(Exception):
- """GuiBot exception base class"""
+ """GuiBot exception base class."""
class FileNotFoundError(GuiBotError):
- """Exception raised when a picture file cannot be found on disc"""
+ """Exception raised when a picture file cannot be found on disc."""
class IncompatibleTargetError(GuiBotError):
- """Exception raised when a matched target is of type that cannot be handled by the finder"""
+ """Exception raised when a matched target is of type that cannot be handled by the finder."""
class IncompatibleTargetFileError(GuiBotError):
- """Exception raised when a matched target is restored from a file of unsupported type"""
+ """Exception raised when a matched target is restored from a file of unsupported type."""
class FindError(GuiBotError):
- """Exception raised when an Image cannot be found on the screen"""
+ """Exception raised when an Image cannot be found on the screen."""
def __init__(self, failed_target: "Target" = None) -> None:
"""
@@ -66,7 +72,7 @@ def __init__(self, failed_target: "Target" = None) -> None:
class NotFindError(GuiBotError):
- """Exception raised when an Image can be found on the screen but should not be"""
+ """Exception raised when an Image can be found on the screen but should not be."""
def __init__(self, failed_target: "Target" = None) -> None:
"""
@@ -75,19 +81,22 @@ def __init__(self, failed_target: "Target" = None) -> None:
:param failed_target: the target that was found
"""
if failed_target:
- message = "The target %s was found on the screen while it was not expected" % failed_target
+ message = (
+ "The target %s was found on the screen while it was not expected"
+ % failed_target
+ )
else:
message = "The target was found on the screen while it was not expected"
super(NotFindError, self).__init__(message)
class UnsupportedBackendError(GuiBotError):
- """Exception raised when a non-existent method is used for finding a target"""
+ """Exception raised when a non-existent method is used for finding a target."""
class MissingHotmapError(GuiBotError):
- """Exception raised when an attempt to access a non-existent hotmap in the image logger is made"""
+ """Exception raised when an attempt to access a non-existent hotmap in the image logger is made."""
class UninitializedBackendError(GuiBotError):
- """Exception raised when a region is created within an empty screen (a disconnected display control backend)"""
+ """Exception raised when a region is created within an empty screen (a disconnected display control backend)."""
diff --git a/guibot/fileresolver.py b/guibot/fileresolver.py
index d54d2316..29ec0e62 100644
--- a/guibot/fileresolver.py
+++ b/guibot/fileresolver.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Cached and reused paths for target files to search in and load target data from.
SUMMARY
------------------------------------------------------
-Cached and reused paths for target files to search in and load target data from.
INTERFACE
@@ -28,17 +28,15 @@
import os
from .errors import *
from typing import Generator
-
import logging
-log = logging.getLogger('guibot.path')
+log = logging.getLogger("guibot.path")
class FileResolver(object):
"""
- Handler for currently used target paths or
- sources of targets with a desired name.
+ Handler for currently used target paths or sources of targets with a desired name.
The methods of this class are shared among
all of its instances.
@@ -49,8 +47,7 @@ class FileResolver(object):
def add_path(self, directory: str) -> None:
"""
- Add a path to the list of currently accessible paths
- if it wasn't already added.
+ Add a path to the list of currently accessible paths if it wasn't already added.
:param directory: path to add
"""
@@ -78,7 +75,9 @@ def clear(self) -> None:
# empty list but keep reference
del FileResolver._target_paths[:]
- def search(self, filename: str, restriction: str = "", silent: bool = False) -> str | None:
+ def search(
+ self, filename: str, restriction: str = "", silent: bool = False
+ ) -> str | None:
"""
Search for a filename in the currently accessible paths.
@@ -97,40 +96,42 @@ def search(self, filename: str, restriction: str = "", silent: bool = False) ->
return fullname
# Check with .png extension for images
- fullname = os.path.join(directory, filename + '.png')
+ fullname = os.path.join(directory, filename + ".png")
if os.path.exists(fullname):
return fullname
# Check with .xml extension for cascade
- fullname = os.path.join(directory, filename + '.xml')
+ fullname = os.path.join(directory, filename + ".xml")
if os.path.exists(fullname):
return fullname
# Check with .txt extension for text
- fullname = os.path.join(directory, filename + '.txt')
+ fullname = os.path.join(directory, filename + ".txt")
if os.path.exists(fullname):
return fullname
# Check with .csv extension for patterns
- fullname = os.path.join(directory, filename + '.csv')
+ fullname = os.path.join(directory, filename + ".csv")
if os.path.exists(fullname):
return fullname
# Check with .steps extension for chains
- fullname = os.path.join(directory, filename + '.steps')
+ fullname = os.path.join(directory, filename + ".steps")
if os.path.exists(fullname):
return fullname
if not silent:
- raise FileNotFoundError('File ' + filename + ' not found')
+ raise FileNotFoundError("File " + filename + " not found")
return None
def __iter__(self) -> Generator[str, None, None]:
+ """Iterate over the target paths."""
for p in self._target_paths:
yield p
def __len__(self) -> int:
+ """Return total number of target paths."""
return len(self._target_paths)
@@ -147,8 +148,7 @@ class CustomFileResolver(object):
def __init__(self, *paths: tuple[type, ...]) -> None:
"""
- Create the class with the paths that the search will be
- restricted to.
+ Create the class with the paths that the search will be restricted to.
:param paths: list of paths that the search will use
"""
@@ -170,7 +170,10 @@ def __enter__(self) -> FileResolver:
file_resolver.add_path(p)
return file_resolver
- def __exit__(self, *args: tuple[type, ...],) -> None:
+ def __exit__(
+ self,
+ *args: tuple[type, ...],
+ ) -> None:
"""
Exit this context and restore the original paths.
diff --git a/guibot/finder.py b/guibot/finder.py
index 26025e75..f3ff3d7d 100644
--- a/guibot/finder.py
+++ b/guibot/finder.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Computer vision finders (CV backends) to perform find targets on screen.
SUMMARY
------------------------------------------------------
-Computer vision finders (CV backends) to perform find targets on screen.
INTERFACE
@@ -34,6 +34,7 @@
import PIL.Image
from typing import Callable
from typing import Any
+import logging
from .config import GlobalConfig, LocalConfig
from .imagelogger import ImageLogger
@@ -41,23 +42,38 @@
from .errors import *
from .location import Location
-import logging
-log = logging.getLogger('guibot.finder')
+
+log = logging.getLogger("guibot.finder")
-__all__ = ['CVParameter', 'Finder', 'AutoPyFinder', 'ContourFinder', 'TemplateFinder',
- 'FeatureFinder', 'CascadeFinder', 'TextFinder', 'TemplateFeatureFinder',
- 'DeepFinder', 'HybridFinder']
+__all__ = [
+ "CVParameter",
+ "Finder",
+ "AutoPyFinder",
+ "ContourFinder",
+ "TemplateFinder",
+ "FeatureFinder",
+ "CascadeFinder",
+ "TextFinder",
+ "TemplateFeatureFinder",
+ "DeepFinder",
+ "HybridFinder",
+]
class CVParameter(object):
"""A class for a single parameter used for CV backend configuration."""
- def __init__(self, value: bool | int | float | str | None,
- min_val: type["value"] = None,
- max_val: type["value"] = None,
- delta: float = 10.0, tolerance: float = 1.0,
- fixed: bool = True, enumerated: bool = False) -> None:
+ def __init__(
+ self,
+ value: bool | int | float | str | None,
+ min_val: type["value"] = None,
+ max_val: type["value"] = None,
+ delta: float = 10.0,
+ tolerance: float = 1.0,
+ fixed: bool = True,
+ enumerated: bool = False,
+ ) -> None:
"""
Build a computer vision parameter.
@@ -105,7 +121,9 @@ def __init__(self, value: bool | int | float | str | None,
# enumerable (e.g. modes) or range value
self.enumerated = enumerated
if self.enumerated and (self.min_val is None or self.max_val is None):
- raise ValueError("Enumerated parameters must have a finite (usually small) range")
+ raise ValueError(
+ "Enumerated parameters must have a finite (usually small) range"
+ )
def __repr__(self) -> str:
"""
@@ -113,12 +131,22 @@ def __repr__(self) -> str:
:returns: special syntax representation of the parameter
"""
- return (""
- % (self.value, self.min_val, self.max_val, self.delta, self.tolerance, self.fixed, self.enumerated))
+ return (
+ ""
+ % (
+ self.value,
+ self.min_val,
+ self.max_val,
+ self.delta,
+ self.tolerance,
+ self.fixed,
+ self.enumerated,
+ )
+ )
def __eq__(self, other: "CVParameter") -> bool:
"""
- Custom implementation for equality check.
+ Check equality for CV parameters.
:returns: whether this instance is equal to another
"""
@@ -136,9 +164,11 @@ def from_string(raw: str) -> "CVParameter":
:raises: :py:class:`ValueError` if unsupported type is encountered
"""
args = []
- string_args = re.match(r"",
- raw).group(1, 2, 3, 4, 5, 6)
+ string_args = re.match(
+ r"",
+ raw,
+ ).group(1, 2, 3, 4, 5, 6)
for arg in string_args:
if arg == "None":
@@ -160,8 +190,11 @@ def from_string(raw: str) -> "CVParameter":
log.log(9, "%s", args)
return CVParameter(*args)
- def random_value(self, mu: bool | int | float | str = None,
- sigma: bool | int | float | str = None) -> bool | int | float | str | None:
+ def random_value(
+ self,
+ mu: bool | int | float | str = None,
+ sigma: bool | int | float | str = None,
+ ) -> bool | int | float | str | None:
"""
Return a random value of the CV parameter given its range and type.
@@ -177,21 +210,23 @@ def random_value(self, mu: bool | int | float | str = None,
if mu is None or self.enumerated:
return random.uniform(self.range[0], self.range[1])
elif sigma is None:
- return min(max(random.gauss(mu, (start-end)/4), start), end)
+ return min(max(random.gauss(mu, (start - end) / 4), start), end)
else:
return min(max(random.gauss(mu, sigma), start), end)
elif isinstance(self.value, int):
if mu is None or self.enumerated:
return random.randint(start, end)
elif sigma is None:
- return min(max(int(random.gauss(mu, (start-end)/4)), start), end)
+ return min(max(int(random.gauss(mu, (start - end) / 4)), start), end)
else:
return min(max(int(random.gauss(mu, sigma)), start), end)
elif isinstance(self.value, bool):
value = random.randint(0, 1)
return value == 1
else:
- log.warning("Cannot generate random value for CV parameters other than float, int, and bool")
+ log.warning(
+ "Cannot generate random value for CV parameters other than float, int, and bool"
+ )
return self.value
@@ -234,7 +269,7 @@ def from_match_file(filename: str) -> "Finder":
if not parser.has_section("find"):
raise IOError("No image matching configuration can be found")
try:
- backend_name = parser.get("find", 'backend')
+ backend_name = parser.get("find", "backend")
except config.NoOptionError:
backend_name = GlobalConfig.find_backend
@@ -261,9 +296,11 @@ def from_match_file(filename: str) -> "Finder":
for category in finder.params.keys():
if parser.has_section(category):
- section_backend = parser.get(category, 'backend')
+ section_backend = parser.get(category, "backend")
if section_backend != finder.params[category]["backend"]:
- finder.configure_backend(backend=section_backend, category=category, reset=False)
+ finder.configure_backend(
+ backend=section_backend, category=category, reset=False
+ )
for option in parser.options(category):
if option == "backend":
continue
@@ -294,14 +331,14 @@ def to_match_file(finder: "Finder", filename: str) -> None:
for section in sections:
if not parser.has_section(section):
parser.add_section(section)
- parser.set(section, 'backend', finder.params[section]["backend"])
+ parser.set(section, "backend", finder.params[section]["backend"])
for option in finder.params[section]:
log.log(9, "%s %s", section, option)
parser.set(section, option, finder.params[section][option])
if not filename.endswith(".match"):
filename += ".match"
- with open(filename, 'w') as configfile:
+ with open(filename, "w") as configfile:
configfile.write("# IMAGE MATCH DATA\n")
parser.write(configfile)
@@ -311,8 +348,17 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
# available and currently fully compatible methods
self.categories["find"] = "find_methods"
- self.algorithms["find_methods"] = ["autopy", "contour", "template", "feature",
- "cascade", "text", "tempfeat", "deep", "hybrid"]
+ self.algorithms["find_methods"] = [
+ "autopy",
+ "contour",
+ "template",
+ "feature",
+ "cascade",
+ "text",
+ "tempfeat",
+ "deep",
+ "hybrid",
+ ]
# other attributes
self.imglog = ImageLogger()
@@ -322,17 +368,22 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
if configure:
self.__configure_backend(reset=True)
- def __configure_backend(self, backend: str = None, category: str = "find",
- reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "find", reset: bool = False
+ ) -> None:
if category != "find":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(Finder, self).configure_backend(backend="cv", reset=True)
if backend is None:
backend = GlobalConfig.find_backend
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
log.log(9, "Setting backend for %s to %s", category, backend)
self.params[category] = {}
@@ -340,28 +391,39 @@ def __configure_backend(self, backend: str = None, category: str = "find",
self.params[category]["similarity"] = CVParameter(0.75, 0.0, 1.0)
log.log(9, "%s %s\n", category, self.params[category])
- def configure_backend(self, backend: str = None, category: str = "find",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "find", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __synchronize_backend(self, backend: str = None, category: str = "find",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "find", reset: bool = False
+ ) -> None:
if category != "find":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(Finder, self).synchronize_backend("cv", reset=True)
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
backend = self.params[category]["backend"]
- def synchronize_backend(self, backend: str = None, category: str = "find",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "find", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
@@ -370,8 +432,9 @@ def synchronize_backend(self, backend: str = None, category: str = "find",
def can_calibrate(self, category: str, mark: bool) -> None:
"""
- Fix the parameters for a given category backend algorithm,
- i.e. disallow the calibrator to change them.
+ Fix the parameters for a given category backend algorithm.
+
+ "Fix" as in disallow the calibrator from changing them.
:param category: backend category whose parameters are marked
:param mark: whether to mark for calibration
@@ -379,8 +442,10 @@ def can_calibrate(self, category: str, mark: bool) -> None:
supported backend categories
"""
if category not in self.categories.keys():
- raise UnsupportedBackendError("Category '%s' not among the "
- "supported %s" % (category, self.categories.keys()))
+ raise UnsupportedBackendError(
+ "Category '%s' not among the "
+ "supported %s" % (category, self.categories.keys())
+ )
for key, value in self.params[category].items():
if not isinstance(value, CVParameter):
@@ -394,7 +459,9 @@ def can_calibrate(self, category: str, mark: bool) -> None:
value.fixed = True
else:
value.fixed = not mark
- log.debug("Setting %s/%s to fixed=%s for calibration", category, key, value.fixed)
+ log.debug(
+ "Setting %s/%s to fixed=%s for calibration", category, key, value.fixed
+ )
def copy(self) -> "Finder":
"""
@@ -412,7 +479,9 @@ def copy(self) -> "Finder":
for category in self.params.keys():
for param in self.params[category].keys():
- acopy.params[category][param] = copy.deepcopy(self.params[category][param])
+ acopy.params[category][param] = copy.deepcopy(
+ self.params[category][param]
+ )
for category in self.params.keys():
try:
@@ -432,7 +501,9 @@ def find(self, needle: "Target | list[Target]", haystack: "Image") -> "list[Matc
:returns: all found matches (one in most use cases)
:raises: :py:class:`NotImplementedError` if the base class method is called
"""
- raise NotImplementedError("Abstract method call - call implementation of this class")
+ raise NotImplementedError(
+ "Abstract method call - call implementation of this class"
+ )
def log(self, lvl: int) -> None:
"""
@@ -449,9 +520,13 @@ def log(self, lvl: int) -> None:
return
# no hotmaps to log
elif len(self.imglog.hotmaps) == 0:
- raise MissingHotmapError("No matching was performed in order to be image logged")
+ raise MissingHotmapError(
+ "No matching was performed in order to be image logged"
+ )
- similarity = self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0
+ similarity = (
+ self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0
+ )
name = "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity)
self.imglog.dump_hotmap(name, self.imglog.hotmaps[-1])
@@ -473,19 +548,25 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
if configure:
self.__configure_backend(reset=True)
- def __configure_backend(self, backend: str = None, category: str = "autopy",
- reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "autopy", reset: bool = False
+ ) -> None:
if category != "autopy":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(AutoPyFinder, self).configure_backend(backend="autopy", reset=True)
self.params[category] = {}
self.params[category]["backend"] = "none"
- def configure_backend(self, backend: str = None, category: str = "autopy",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "autopy", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
@@ -494,6 +575,8 @@ def configure_backend(self, backend: str = None, category: str = "autopy",
def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
"""
+ Find all needle targets in a haystack image.
+
Custom implementation of the base method.
:param needle: target iamge to search for
@@ -525,13 +608,15 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
self._bitmapcache[needle.filename] = autopy_needle
# TODO: Use in-memory conversion
- with NamedTemporaryFile(prefix='guibot', suffix='.png') as f:
+ with NamedTemporaryFile(prefix="guibot", suffix=".png") as f:
haystack.save(f.name)
autopy_screenshot = bitmap.Bitmap.open(f.name)
autopy_tolerance = 1.0 - self.params["find"]["similarity"].value
- log.debug("Performing autopy template matching with tolerance %s (color)",
- autopy_tolerance)
+ log.debug(
+ "Performing autopy template matching with tolerance %s (color)",
+ autopy_tolerance,
+ )
# TODO: since only the coordinates are available and fuzzy areas of
# matches are returned we need to ask autopy team for returning
@@ -548,10 +633,12 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
w, h = needle.width, needle.height
dx, dy = needle.center_offset.x, needle.center_offset.y
from .match import Match
+
matches = [Match(x, y, w, h, dx, dy, similarity)]
from PIL import ImageDraw
+
draw = ImageDraw.Draw(self.imglog.hotmaps[-1])
- draw.rectangle((x, y, x+w, y+h), outline=(0, 0, 255))
+ draw.rectangle((x, y, x + w, y + h), outline=(0, 0, 255))
del draw
else:
matches = []
@@ -582,15 +669,13 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
if configure:
self.__configure(reset=True)
- def __configure_backend(self, backend: str = None, category: str = "contour",
- reset: bool = False) -> None:
- """
- Custom implementation of the base method.
-
- See base method for details.
- """
+ def __configure_backend(
+ self, backend: str = None, category: str = "contour", reset: bool = False
+ ) -> None:
if category not in ["contour", "threshold"]:
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(ContourFinder, self).configure_backend("contour", reset=True)
if category == "contour" and backend is None:
@@ -598,8 +683,10 @@ def __configure_backend(self, backend: str = None, category: str = "contour",
elif category == "threshold" and backend is None:
backend = GlobalConfig.contour_threshold_backend
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
log.log(9, "Setting backend for %s to %s", category, backend)
self.params[category] = {}
@@ -607,12 +694,18 @@ def __configure_backend(self, backend: str = None, category: str = "contour",
if category == "contour":
# 1 RETR_EXTERNAL, 2 RETR_LIST, 3 RETR_CCOMP, 4 RETR_TREE
- self.params[category]["retrievalMode"] = CVParameter(2, 1, 4, enumerated=True)
+ self.params[category]["retrievalMode"] = CVParameter(
+ 2, 1, 4, enumerated=True
+ )
# 1 CHAIN_APPROX_NONE, 2 CHAIN_APPROX_SIMPLE, 3 CHAIN_APPROX_TC89_L1, 4 CHAIN_APPROX_TC89_KCOS
- self.params[category]["approxMethod"] = CVParameter(2, 1, 4, enumerated=True)
+ self.params[category]["approxMethod"] = CVParameter(
+ 2, 1, 4, enumerated=True
+ )
self.params[category]["minArea"] = CVParameter(0, 0, None, 100.0)
# 1 L1 method, 2 L2 method, 3 L3 method
- self.params[category]["contoursMatch"] = CVParameter(1, 1, 3, enumerated=True)
+ self.params[category]["contoursMatch"] = CVParameter(
+ 1, 1, 3, enumerated=True
+ )
elif category == "threshold":
# 1 normal, 2 median, 3 gaussian, 4 none
self.params[category]["blurType"] = CVParameter(4, 1, 4, enumerated=True)
@@ -623,36 +716,63 @@ def __configure_backend(self, backend: str = None, category: str = "contour",
self.params[category]["thresholdValue"] = CVParameter(122, 0, 255, 50.0)
self.params[category]["thresholdMax"] = CVParameter(255, 0, 255, 20.0)
# 0 binary, 1 binar_inv, 2 trunc, 3 tozero, 4 tozero_inv, 5 mask, 6 otsu, 7 triangle
- self.params[category]["thresholdType"] = CVParameter(1, 0, 7, enumerated=True)
+ self.params[category]["thresholdType"] = CVParameter(
+ 1, 0, 7, enumerated=True
+ )
elif backend == "adaptive":
self.params[category]["thresholdMax"] = CVParameter(255, 0, 255, 20.0)
# 0 adaptive mean threshold, 1 adaptive gaussian (weighted mean) threshold
- self.params[category]["adaptiveMethod"] = CVParameter(1, 0, 1, enumerated=True)
+ self.params[category]["adaptiveMethod"] = CVParameter(
+ 1, 0, 1, enumerated=True
+ )
# 0 normal, 1 inverted
- self.params[category]["thresholdType"] = CVParameter(1, 0, 1, enumerated=True)
+ self.params[category]["thresholdType"] = CVParameter(
+ 1, 0, 1, enumerated=True
+ )
# size of the neighborhood to consider to adaptive thresholding
- self.params[category]["blockSize"] = CVParameter(11, 3, None, 200.0, 2.0)
+ self.params[category]["blockSize"] = CVParameter(
+ 11, 3, None, 200.0, 2.0
+ )
# constant to substract from the (weighted) calculated mean
self.params[category]["constant"] = CVParameter(2, -255, 255, 1.0)
elif backend == "canny":
- self.params[category]["threshold1"] = CVParameter(100.0, 0.0, None, 50.0)
- self.params[category]["threshold2"] = CVParameter(1000.0, 0.0, None, 500.0)
-
- def configure_backend(self, backend: str = None, category: str = "contour",
- reset: bool = False) -> None:
+ self.params[category]["threshold1"] = CVParameter(
+ 100.0, 0.0, None, 50.0
+ )
+ self.params[category]["threshold2"] = CVParameter(
+ 1000.0, 0.0, None, 500.0
+ )
+
+ def configure_backend(
+ self, backend: str = None, category: str = "contour", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __configure(self, threshold_filter: str = None, reset: bool = True, **kwargs: dict[str, type]) -> None:
+ def __configure(
+ self,
+ threshold_filter: str = None,
+ reset: bool = True,
+ **kwargs: dict[str, type]
+ ) -> None:
self.__configure_backend(category="contour", reset=reset)
self.__configure_backend(threshold_filter, "threshold")
- def configure(self, threshold_filter: str = None, reset: bool = True, **kwargs: dict[str, type]) -> None:
+ def configure(
+ self,
+ threshold_filter: str = None,
+ reset: bool = True,
+ **kwargs: dict[str, type]
+ ) -> None:
"""
+ Generate configuration dictionary for all backends.
+
Custom implementation of the base method.
:param threshold_filter: name of a preselected backend
@@ -662,6 +782,8 @@ def configure(self, threshold_filter: str = None, reset: bool = True, **kwargs:
def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
"""
+ Find all needle targets in a haystack image.
+
Custom implementation of the base method.
:param needle: target iamge to search for
@@ -704,10 +826,13 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
for j, ncontour in enumerate(needle_contours):
if cv2.contourArea(ncontour) < self.params["contour"]["minArea"].value:
continue
- distances[i, j] = cv2.matchShapes(hcontour, ncontour, self.params["contour"]["contoursMatch"].value, 0)
+ distances[i, j] = cv2.matchShapes(
+ hcontour, ncontour, self.params["contour"]["contoursMatch"].value, 0
+ )
assert distances[i, j] >= 0.0
from .match import Match
+
matches = []
nx, ny, nw, nh = cv2.boundingRect(numpy.concatenate(needle_contours, axis=0))
while True:
@@ -719,41 +844,78 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
# we don't allow collapsing into the same needle contour, i.e.
# the map from the needle to the haystack contours is injective
# -> so here we cross the entire row rather than one value in it
- distances[index[0][0], :] = 1.1 # like this works even for similarity 0.0
+ distances[index[0][0], :] = (
+ 1.1 # like this works even for similarity 0.0
+ )
matching_haystack_contours.append(haystack_contours[index[0][0]])
average_distance = numpy.average(matching_haystack_distances)
required_distance = 1.0 - self.params["find"]["similarity"].value
- logging.debug("Average distance to next needle shape is %s of max allowed %s",
- average_distance, required_distance)
+ logging.debug(
+ "Average distance to next needle shape is %s of max allowed %s",
+ average_distance,
+ required_distance,
+ )
if average_distance > required_distance:
break
else:
shape = numpy.concatenate(matching_haystack_contours, axis=0)
x, y, w, h = cv2.boundingRect(shape)
# calculate needle upleft and downright points to return its (0,0) location
- needle_upleft = (max(int((x-nx)*float(w)/nw), 0), max(int((y-ny)*float(h)/nh), 0))
- needle_downright = (min(int(needle_upleft[0]+needle.width*float(w)/nw), haystack.width),
- min(int(needle_upleft[1]+needle.height*float(h)/nh), haystack.height))
- needle_center_offset = (needle.center_offset.x*float(w)/nw,
- needle.center_offset.y*float(h)/nh)
- cv2.rectangle(self.imglog.hotmaps[-1], needle_upleft, needle_downright, (0, 0, 0), 2)
- cv2.rectangle(self.imglog.hotmaps[-1], needle_upleft, needle_downright, (255, 255, 255), 1)
+ needle_upleft = (
+ max(int((x - nx) * float(w) / nw), 0),
+ max(int((y - ny) * float(h) / nh), 0),
+ )
+ needle_downright = (
+ min(
+ int(needle_upleft[0] + needle.width * float(w) / nw),
+ haystack.width,
+ ),
+ min(
+ int(needle_upleft[1] + needle.height * float(h) / nh),
+ haystack.height,
+ ),
+ )
+ needle_center_offset = (
+ needle.center_offset.x * float(w) / nw,
+ needle.center_offset.y * float(h) / nh,
+ )
+ cv2.rectangle(
+ self.imglog.hotmaps[-1],
+ needle_upleft,
+ needle_downright,
+ (0, 0, 0),
+ 2,
+ )
+ cv2.rectangle(
+ self.imglog.hotmaps[-1],
+ needle_upleft,
+ needle_downright,
+ (255, 255, 255),
+ 1,
+ )
# NOTE: to extract the region of interest just do:
# roi = thresh_haystack[y:y+h,x:x+w]
similarity = 1.0 - average_distance
self.imglog.similarities.append(similarity)
self.imglog.locations.append(needle_upleft)
- matches.append(Match(needle_upleft[0], needle_upleft[1],
- needle_downright[0] - needle_upleft[0],
- needle_downright[1] - needle_upleft[1],
- needle_center_offset[0], needle_center_offset[1],
- similarity))
+ matches.append(
+ Match(
+ needle_upleft[0],
+ needle_upleft[1],
+ needle_downright[0] - needle_upleft[0],
+ needle_downright[1] - needle_upleft[1],
+ needle_center_offset[0],
+ needle_center_offset[1],
+ similarity,
+ )
+ )
self.imglog.log(30)
return matches
def _binarize_image(self, image: "Matlike", log: bool = False) -> "Matlike":
import cv2
+
# blur first in order to avoid unwonted edges caused from noise
blurSize = self.params["threshold"]["blurKernelSize"].value
blurDeviation = self.params["threshold"]["blurKernelSigma"].value
@@ -763,37 +925,50 @@ def _binarize_image(self, image: "Matlike", log: bool = False) -> "Matlike":
elif self.params["threshold"]["blurType"].value == 2:
blur_image = cv2.medianBlur(gray_image, blurSize)
elif self.params["threshold"]["blurType"].value == 3:
- blur_image = cv2.GaussianBlur(gray_image, (blurSize, blurSize), blurDeviation)
+ blur_image = cv2.GaussianBlur(
+ gray_image, (blurSize, blurSize), blurDeviation
+ )
elif self.params["threshold"]["blurType"].value == 4:
blur_image = gray_image
# second stage: thresholding
if self.params["threshold"]["backend"] == "normal":
- _, thresh_image = cv2.threshold(blur_image,
- self.params["threshold"]["thresholdValue"].value,
- self.params["threshold"]["thresholdMax"].value,
- self.params["threshold"]["thresholdType"].value)
+ _, thresh_image = cv2.threshold(
+ blur_image,
+ self.params["threshold"]["thresholdValue"].value,
+ self.params["threshold"]["thresholdMax"].value,
+ self.params["threshold"]["thresholdType"].value,
+ )
elif self.params["threshold"]["backend"] == "adaptive":
- thresh_image = cv2.adaptiveThreshold(blur_image,
- self.params["threshold"]["thresholdMax"].value,
- self.params["threshold"]["adaptiveMethod"].value,
- self.params["threshold"]["thresholdType"].value,
- self.params["threshold"]["blockSize"].value,
- self.params["threshold"]["constant"].value)
+ thresh_image = cv2.adaptiveThreshold(
+ blur_image,
+ self.params["threshold"]["thresholdMax"].value,
+ self.params["threshold"]["adaptiveMethod"].value,
+ self.params["threshold"]["thresholdType"].value,
+ self.params["threshold"]["blockSize"].value,
+ self.params["threshold"]["constant"].value,
+ )
elif self.params["threshold"]["backend"] == "canny":
- thresh_image = cv2.Canny(blur_image,
- self.params["threshold"]["threshold1"].value,
- self.params["threshold"]["threshold2"].value)
+ thresh_image = cv2.Canny(
+ blur_image,
+ self.params["threshold"]["threshold1"].value,
+ self.params["threshold"]["threshold2"].value,
+ )
if log:
self.imglog.hotmaps.append(thresh_image)
return thresh_image
- def _extract_contours(self, countours_image: "Matlike", log: bool = False) -> "list[Matlike]":
+ def _extract_contours(
+ self, countours_image: "Matlike", log: bool = False
+ ) -> "list[Matlike]":
import cv2
- rargs = cv2.findContours(countours_image,
- self.params["contour"]["retrievalMode"].value,
- self.params["contour"]["approxMethod"].value)
+
+ rargs = cv2.findContours(
+ countours_image,
+ self.params["contour"]["retrievalMode"].value,
+ self.params["contour"]["approxMethod"].value,
+ )
if len(rargs) == 3:
_, contours, hierarchy = rargs
else:
@@ -806,6 +981,8 @@ def _extract_contours(self, countours_image: "Matlike", log: bool = False) -> "l
def log(self, lvl: int) -> None:
"""
+ Log images with an arbitrary logging level.
+
Custom implementation of the base method.
See base method for details.
@@ -819,16 +996,26 @@ def log(self, lvl: int) -> None:
return
# no hotmaps to log
elif len(self.imglog.hotmaps) == 0:
- raise MissingHotmapError("No matching was performed in order to be image logged")
-
- self.imglog.dump_hotmap("imglog%s-3hotmap-1threshold.png" % self.imglog.printable_step,
- self.imglog.hotmaps[0])
- self.imglog.dump_hotmap("imglog%s-3hotmap-2contours.png" % self.imglog.printable_step,
- self.imglog.hotmaps[1])
-
- similarity = self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0
- self.imglog.dump_hotmap("imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity),
- self.imglog.hotmaps[-1])
+ raise MissingHotmapError(
+ "No matching was performed in order to be image logged"
+ )
+
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-1threshold.png" % self.imglog.printable_step,
+ self.imglog.hotmaps[0],
+ )
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-2contours.png" % self.imglog.printable_step,
+ self.imglog.hotmaps[1],
+ )
+
+ similarity = (
+ self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0
+ )
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity),
+ self.imglog.hotmaps[-1],
+ )
self.imglog.clear()
ImageLogger.step += 1
@@ -844,28 +1031,32 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
# available and currently fully compatible methods
self.categories["template"] = "template_matchers"
# we only use the normalized version of "sqdiff", "ccorr", and "ccoeff"
- self.algorithms["template_matchers"] = ("sqdiff_normed", "ccorr_normed", "ccoeff_normed")
+ self.algorithms["template_matchers"] = (
+ "sqdiff_normed",
+ "ccorr_normed",
+ "ccoeff_normed",
+ )
# additional preparation (no synchronization available)
if configure:
self.__configure_backend(reset=True)
- def __configure_backend(self, backend: str = None, category: str = "template",
- reset: bool = False) -> None:
- """
- Custom implementation of the base method.
-
- See base method for details.
- """
+ def __configure_backend(
+ self, backend: str = None, category: str = "template", reset: bool = False
+ ) -> None:
if category != "template":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(TemplateFinder, self).configure_backend("template", reset=True)
if backend is None:
backend = GlobalConfig.template_match_backend
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
log.log(9, "Setting backend for %s to %s", category, backend)
self.params[category] = {}
@@ -873,9 +1064,12 @@ def __configure_backend(self, backend: str = None, category: str = "template",
self.params[category]["nocolor"] = CVParameter(False)
log.log(9, "%s %s\n", category, self.params[category])
- def configure_backend(self, backend: str = None, category: str = "template",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "template", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
@@ -884,6 +1078,8 @@ def configure_backend(self, backend: str = None, category: str = "template",
def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
"""
+ Find all needle targets in a haystack image.
+
Custom implementation of the base method.
:param needle: target iamge to search for
@@ -899,14 +1095,25 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
self.imglog.haystack = haystack
self.imglog.dump_matched_images()
- if self.params["template"]["backend"] not in self.algorithms["template_matchers"]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (self.params["template"]["backend"],
- self.algorithms["template_matchers"]))
+ if (
+ self.params["template"]["backend"]
+ not in self.algorithms["template_matchers"]
+ ):
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s"
+ % (
+ self.params["template"]["backend"],
+ self.algorithms["template_matchers"],
+ )
+ )
match_template = self.params["template"]["backend"]
no_color = self.params["template"]["nocolor"].value
- log.debug("Performing %s template matching %s color",
- match_template, "without" if no_color else "with")
+ log.debug(
+ "Performing %s template matching %s color",
+ match_template,
+ "without" if no_color else "with",
+ )
result = self._match_template(needle, haystack, no_color, match_template)
if result is None:
log.warning("OpenCV's template matching returned no result")
@@ -917,6 +1124,7 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
import cv2
import numpy
+
universal_hotmap = result * 255.0
final_hotmap = numpy.array(self.imglog.haystack.pil_image)
if self.params["template"]["nocolor"].value:
@@ -925,21 +1133,31 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
# extract maxima once for each needle size region
similarity = self.params["find"]["similarity"].value
from .match import Match
+
matches = []
while True:
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
# rectify to the [0,1] interval to avoid negative values in some methods
maxVal = min(max(maxVal, 0.0), 1.0)
- log.debug('Next best match with value %s (similarity %s) and location (x,y) %s',
- str(maxVal), similarity, str(maxLoc))
+ log.debug(
+ "Next best match with value %s (similarity %s) and location (x,y) %s",
+ str(maxVal),
+ similarity,
+ str(maxLoc),
+ )
if maxVal < similarity:
if len(matches) == 0:
self.imglog.similarities.append(maxVal)
self.imglog.locations.append(maxLoc)
current_hotmap = numpy.copy(universal_hotmap)
- cv2.circle(current_hotmap, (maxLoc[0], maxLoc[1]), int(30*maxVal), (255, 255, 255))
+ cv2.circle(
+ current_hotmap,
+ (maxLoc[0], maxLoc[1]),
+ int(30 * maxVal),
+ (255, 255, 255),
+ )
self.imglog.hotmaps.append(current_hotmap)
self.imglog.hotmaps.append(final_hotmap)
@@ -949,12 +1167,17 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
self.imglog.similarities.append(maxVal)
self.imglog.locations.append(maxLoc)
current_hotmap = numpy.copy(universal_hotmap)
- cv2.circle(current_hotmap, (maxLoc[0], maxLoc[1]), int(30*maxVal), (255, 255, 255))
+ cv2.circle(
+ current_hotmap,
+ (maxLoc[0], maxLoc[1]),
+ int(30 * maxVal),
+ (255, 255, 255),
+ )
x, y = maxLoc
w, h = needle.width, needle.height
dx, dy = needle.center_offset.x, needle.center_offset.y
- cv2.rectangle(final_hotmap, (x, y), (x+w, y+h), (0, 0, 0), 2)
- cv2.rectangle(final_hotmap, (x, y), (x+w, y+h), (255, 255, 255), 1)
+ cv2.rectangle(final_hotmap, (x, y), (x + w, y + h), (0, 0, 0), 2)
+ cv2.rectangle(final_hotmap, (x, y), (x + w, y + h), (255, 255, 255), 1)
self.imglog.hotmaps.append(current_hotmap)
log.debug("Next best match is acceptable")
matches.append(Match(x, y, w, h, dx, dy, maxVal))
@@ -970,10 +1193,22 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
match_y1 = min(maxLoc[1] + int(0.5 * needle.height), res_h)
# log this only if performing deep internal debugging
- log.log(9, "Wipe image matches in x [%s, %s]/[%s, %s]",
- match_x0, match_x1, 0, res_w)
- log.log(9, "Wipe image matches in y [%s, %s]/[%s, %s]",
- match_y0, match_y1, 0, res_h)
+ log.log(
+ 9,
+ "Wipe image matches in x [%s, %s]/[%s, %s]",
+ match_x0,
+ match_x1,
+ 0,
+ res_w,
+ )
+ log.log(
+ 9,
+ "Wipe image matches in y [%s, %s]/[%s, %s]",
+ match_y0,
+ match_y1,
+ 0,
+ res_h,
+ )
# clean found image to look for next safe distance match
result[match_y0:match_y1, match_x0:match_x1] = 0.0
@@ -985,8 +1220,9 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
return matches
- def _match_template(self, needle: "Image", haystack: "Image", nocolor: str,
- method: str) -> "Matlike | None":
+ def _match_template(
+ self, needle: "Image", haystack: "Image", nocolor: str, method: str
+ ) -> "Matlike | None":
"""
EXTRA DOCSTRING: Template matching backend - wrapper.
@@ -995,15 +1231,26 @@ def _match_template(self, needle: "Image", haystack: "Image", nocolor: str,
"""
# sanity check: needle size must be smaller than haystack
if haystack.width < needle.width or haystack.height < needle.height:
- log.warning("The size of the searched image (%sx%s) does not fit the search region (%sx%s)",
- needle.width, needle.height, haystack.width, haystack.height)
+ log.warning(
+ "The size of the searched image (%sx%s) does not fit the search region (%sx%s)",
+ needle.width,
+ needle.height,
+ haystack.width,
+ haystack.height,
+ )
return None
import cv2
import numpy
- methods = {"sqdiff": cv2.TM_SQDIFF, "sqdiff_normed": cv2.TM_SQDIFF_NORMED,
- "ccorr": cv2.TM_CCORR, "ccorr_normed": cv2.TM_CCORR_NORMED,
- "ccoeff": cv2.TM_CCOEFF, "ccoeff_normed": cv2.TM_CCOEFF_NORMED}
+
+ methods = {
+ "sqdiff": cv2.TM_SQDIFF,
+ "sqdiff_normed": cv2.TM_SQDIFF_NORMED,
+ "ccorr": cv2.TM_CCORR,
+ "ccorr_normed": cv2.TM_CCORR_NORMED,
+ "ccoeff": cv2.TM_CCOEFF,
+ "ccoeff_normed": cv2.TM_CCOEFF_NORMED,
+ }
if method not in methods.keys():
raise UnsupportedBackendError("Supported algorithms are in conflict")
@@ -1020,6 +1267,8 @@ def _match_template(self, needle: "Image", haystack: "Image", nocolor: str,
def log(self, lvl: int) -> None:
"""
+ Log images with an arbitrary logging level.
+
Custom implementation of the base method.
See base method for details.
@@ -1033,16 +1282,25 @@ def log(self, lvl: int) -> None:
return
# no hotmaps to log
elif len(self.imglog.hotmaps) == 0:
- raise MissingHotmapError("No matching was performed in order to be image logged")
+ raise MissingHotmapError(
+ "No matching was performed in order to be image logged"
+ )
for i in range(len(self.imglog.similarities)):
- name = "imglog%s-3hotmap-%stemplate-%s.png" % (self.imglog.printable_step,
- i + 1, self.imglog.similarities[i])
+ name = "imglog%s-3hotmap-%stemplate-%s.png" % (
+ self.imglog.printable_step,
+ i + 1,
+ self.imglog.similarities[i],
+ )
self.imglog.dump_hotmap(name, self.imglog.hotmaps[i])
- similarity = self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0
- self.imglog.dump_hotmap("imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity),
- self.imglog.hotmaps[-1])
+ similarity = (
+ self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0
+ )
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity),
+ self.imglog.hotmaps[-1],
+ )
self.imglog.clear()
ImageLogger.step += 1
@@ -1066,11 +1324,23 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
self.categories["fextract"] = "feature_extractors"
self.categories["fmatch"] = "feature_matchers"
self.algorithms["feature_projectors"] = ("mixed",)
- self.algorithms["feature_matchers"] = ("BruteForce", "BruteForce-L1", "BruteForce-Hamming",
- "BruteForce-Hamming(2)")
- self.algorithms["feature_detectors"] = ("ORB", "BRISK", "KAZE", "AKAZE", "MSER",
- "AgastFeatureDetector", "FastFeatureDetector", "GFTTDetector",
- "SimpleBlobDetector")
+ self.algorithms["feature_matchers"] = (
+ "BruteForce",
+ "BruteForce-L1",
+ "BruteForce-Hamming",
+ "BruteForce-Hamming(2)",
+ )
+ self.algorithms["feature_detectors"] = (
+ "ORB",
+ "BRISK",
+ "KAZE",
+ "AKAZE",
+ "MSER",
+ "AgastFeatureDetector",
+ "FastFeatureDetector",
+ "GFTTDetector",
+ "SimpleBlobDetector",
+ )
# TODO: we could also support "StereoSGBM" but it needs initialization arguments
# BUG: "KAZE", "AKAZE" we get internal error when using KAZE/AKAZE even though it should be possible
self.algorithms["feature_extractors"] = ("ORB", "BRISK")
@@ -1086,10 +1356,13 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
if synchronize:
self.__synchronize(reset=False)
- def __configure_backend(self, backend: str = None, category: str = "feature",
- reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "feature", reset: bool = False
+ ) -> None:
if category not in ["feature", "fdetect", "fextract", "fmatch"]:
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(FeatureFinder, self).configure_backend("feature", reset=True)
if category == "feature" and backend is None:
@@ -1101,8 +1374,10 @@ def __configure_backend(self, backend: str = None, category: str = "feature",
elif category == "fmatch" and backend is None:
backend = GlobalConfig.feature_match_backend
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
log.log(9, "Setting backend for %s to %s", category, backend)
self.params[category] = {}
@@ -1110,22 +1385,30 @@ def __configure_backend(self, backend: str = None, category: str = "feature",
if category == "feature":
# 0 for homography, 1 for fundamental matrix
- self.params[category]["projectionMethod"] = CVParameter(0, 0, 1, enumerated=True)
- self.params[category]["ransacReprojThreshold"] = CVParameter(0.0, 0.0, 200.0, 50.0)
+ self.params[category]["projectionMethod"] = CVParameter(
+ 0, 0, 1, enumerated=True
+ )
+ self.params[category]["ransacReprojThreshold"] = CVParameter(
+ 0.0, 0.0, 200.0, 50.0
+ )
self.params[category]["minDetectedFeatures"] = CVParameter(4, 1, None)
self.params[category]["minMatchedFeatures"] = CVParameter(4, 1, None)
# 0 for matched/detected ratio, 1 for projected/matched ratio
- self.params[category]["similarityRatio"] = CVParameter(1, 0, 1, enumerated=True)
+ self.params[category]["similarityRatio"] = CVParameter(
+ 1, 0, 1, enumerated=True
+ )
elif category == "fdetect":
self.params[category]["nzoom"] = CVParameter(1.0, 1.0, 10.0, 2.5)
self.params[category]["hzoom"] = CVParameter(1.0, 1.0, 10.0, 2.5)
import cv2
+
feature_detector_create = getattr(cv2, "%s_create" % backend)
backend_obj = feature_detector_create()
elif category == "fextract":
import cv2
+
descriptor_extractor_create = getattr(cv2, "%s_create" % backend)
backend_obj = descriptor_extractor_create()
@@ -1134,10 +1417,14 @@ def __configure_backend(self, backend: str = None, category: str = "feature",
self.params[category]["refinements"] = CVParameter(50, 1, None)
self.params[category]["recalc_interval"] = CVParameter(10, 1, None)
self.params[category]["variants_k"] = CVParameter(100, 1, None)
- self.params[category]["variants_ratio"] = CVParameter(0.33, 0.0001, 1.0, 0.25)
+ self.params[category]["variants_ratio"] = CVParameter(
+ 0.33, 0.0001, 1.0, 0.25
+ )
return
else:
- self.params[category]["ratioThreshold"] = CVParameter(0.65, 0.0, 1.0, 0.25, 0.01)
+ self.params[category]["ratioThreshold"] = CVParameter(
+ 0.65, 0.0, 1.0, 0.25, 0.01
+ )
self.params[category]["ratioTest"] = CVParameter(False)
self.params[category]["symmetryTest"] = CVParameter(False)
@@ -1147,6 +1434,7 @@ def __configure_backend(self, backend: str = None, category: str = "feature",
else:
import cv2
+
# NOTE: descriptor matcher creation is kept the old way while feature
# detection and extraction not - example of the untidy maintenance of OpenCV
backend_obj = cv2.DescriptorMatcher_create(backend)
@@ -1176,7 +1464,9 @@ def __configure_backend(self, backend: str = None, category: str = "feature",
elif category in ("fdetect", "fextract") and param == "WTA_K":
self.params[category][param] = CVParameter(val, 2, 4, 1.0)
elif category in ("fdetect", "fextract") and param == "ScaleFactor":
- self.params[category][param] = CVParameter(val, 1.01, 2.0, 0.25, 0.05)
+ self.params[category][param] = CVParameter(
+ val, 1.01, 2.0, 0.25, 0.05
+ )
elif category in ("fdetect", "fextract") and param == "NLevels":
self.params[category][param] = CVParameter(val, 1, 100, 25, 0.5)
elif category in ("fdetect", "fextract") and param == "NLevels":
@@ -1187,9 +1477,12 @@ def __configure_backend(self, backend: str = None, category: str = "feature",
self.params[category][param] = CVParameter(val)
log.log(9, "%s=%s", param, val)
- def configure_backend(self, backend: str = None, category: str = "feature",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "feature", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
Some relevant parameters are:
@@ -1208,18 +1501,30 @@ def configure_backend(self, backend: str = None, category: str = "feature",
"""
self.__configure_backend(backend, category, reset)
- def __configure(self, feature_detect: str = None, feature_extract: str = None,
- feature_match: str = None, reset: bool = True,
- **kwargs: dict[str, type]) -> None:
+ def __configure(
+ self,
+ feature_detect: str = None,
+ feature_extract: str = None,
+ feature_match: str = None,
+ reset: bool = True,
+ **kwargs: dict[str, type]
+ ) -> None:
self.__configure_backend(category="feature", reset=reset)
self.__configure_backend(feature_detect, "fdetect")
self.__configure_backend(feature_extract, "fextract")
self.__configure_backend(feature_match, "fmatch")
- def configure(self, feature_detect: str = None, feature_extract: str = None,
- feature_match: str = None, reset: bool = True,
- **kwargs: dict[str, type]) -> None:
+ def configure(
+ self,
+ feature_detect: str = None,
+ feature_extract: str = None,
+ feature_match: str = None,
+ reset: bool = True,
+ **kwargs: dict[str, type]
+ ) -> None:
"""
+ Generate configuration dictionary for all backends.
+
Custom implementation of the base method.
:param feature_detect: name of a preselected backend
@@ -1229,14 +1534,19 @@ def configure(self, feature_detect: str = None, feature_extract: str = None,
"""
self.__configure(feature_detect, feature_extract, feature_match, reset)
- def __synchronize_backend(self, backend: str = None, category: str = "feature",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "feature", reset: bool = False
+ ) -> None:
if category not in ["feature", "fdetect", "fextract", "fmatch"]:
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(FeatureFinder, self).synchronize_backend("feature", reset=True)
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
backend = self.params[category]["backend"]
backend_obj = None
@@ -1245,14 +1555,17 @@ def __synchronize_backend(self, backend: str = None, category: str = "feature",
return
elif category == "fdetect":
import cv2
+
feature_detector_create = getattr(cv2, "%s_create" % backend)
backend_obj = feature_detector_create()
elif category == "fextract":
import cv2
+
descriptor_extractor_create = getattr(cv2, "%s_create" % backend)
backend_obj = descriptor_extractor_create()
elif category == "fmatch":
import cv2
+
# NOTE: descriptor matcher creation is kept the old way while feature
# detection and extraction not - example of the untidy maintenance of OpenCV
backend_obj = cv2.DescriptorMatcher_create(backend)
@@ -1284,25 +1597,40 @@ def __synchronize_backend(self, backend: str = None, category: str = "feature",
elif category == "fmatch":
self.matcher = backend_obj
- def synchronize_backend(self, backend: str = None, category: str = "feature",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "feature", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
"""
self.__synchronize_backend(backend, category, reset)
- def __synchronize(self, feature_detect: str = None, feature_extract: str = None,
- feature_match: str = None, reset: bool = True) -> None:
+ def __synchronize(
+ self,
+ feature_detect: str = None,
+ feature_extract: str = None,
+ feature_match: str = None,
+ reset: bool = True,
+ ) -> None:
self.__synchronize_backend(category="feature", reset=reset)
self.__synchronize_backend(feature_detect, "fdetect")
self.__synchronize_backend(feature_extract, "fextract")
self.__synchronize_backend(feature_match, "fmatch")
- def synchronize(self, feature_detect: str = None, feature_extract: str = None,
- feature_match: str = None, reset: bool = True) -> None:
+ def synchronize(
+ self,
+ feature_detect: str = None,
+ feature_extract: str = None,
+ feature_match: str = None,
+ reset: bool = True,
+ ) -> None:
"""
+ Synchronize all backends with the current configuration dictionary.
+
Custom implementation of the base method.
:param feature_detect: name of a preselected backend
@@ -1314,6 +1642,8 @@ def synchronize(self, feature_detect: str = None, feature_extract: str = None,
def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
"""
+ Find all needle targets in a haystack image.
+
Custom implementation of the base method.
:param needle: target iamge to search for
@@ -1334,6 +1664,7 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
import cv2
import numpy
+
ngray = cv2.cvtColor(numpy.array(needle.pil_image), cv2.COLOR_RGB2GRAY)
hgray = cv2.cvtColor(numpy.array(haystack.pil_image), cv2.COLOR_RGB2GRAY)
self.imglog.hotmaps.append(numpy.array(haystack.pil_image))
@@ -1343,14 +1674,21 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
# project more points for debugging purposes and image logging
npoints = []
- npoints.extend([(0, 0), (needle.width, 0), (0, needle.height),
- (needle.width, needle.height)])
+ npoints.extend(
+ [
+ (0, 0),
+ (needle.width, 0),
+ (0, needle.height),
+ (needle.width, needle.height),
+ ]
+ )
npoints.append((needle.width / 2, needle.height / 2))
similarity = self.params["find"]["similarity"].value
hpoints = self._project_features(npoints, ngray, hgray, similarity)
if hpoints is not None and len(hpoints) > 0:
from .match import Match
+
x, y = hpoints[0]
w, h = tuple(numpy.abs(numpy.subtract(hpoints[3], hpoints[0])))
# TODO: projecting offset requires more effort
@@ -1360,8 +1698,13 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
self.imglog.log(40)
return []
- def _project_features(self, locations_in_needle: list[tuple[int, int]], ngray: "Matlike",
- hgray: "Matlike", similarity: float) -> list[tuple[int, int]] | None:
+ def _project_features(
+ self,
+ locations_in_needle: list[tuple[int, int]],
+ ngray: "Matlike",
+ hgray: "Matlike",
+ similarity: float,
+ ) -> list[tuple[int, int]] | None:
"""
EXTRA DOCSTRING: Feature matching backend - wrapper.
@@ -1372,45 +1715,70 @@ def _project_features(self, locations_in_needle: list[tuple[int, int]], ngray: "
self.imglog.locations.append((0, 0))
self.imglog.similarities.append(0.0)
- log.debug("Performing %s feature matching (no color)",
- "-".join([self.params["fdetect"]["backend"],
- self.params["fextract"]["backend"],
- self.params["fmatch"]["backend"]]))
- nkp, ndc, hkp, hdc = self._detect_features(ngray, hgray,
- self.params["fdetect"]["backend"],
- self.params["fextract"]["backend"])
+ log.debug(
+ "Performing %s feature matching (no color)",
+ "-".join(
+ [
+ self.params["fdetect"]["backend"],
+ self.params["fextract"]["backend"],
+ self.params["fmatch"]["backend"],
+ ]
+ ),
+ )
+ nkp, ndc, hkp, hdc = self._detect_features(
+ ngray,
+ hgray,
+ self.params["fdetect"]["backend"],
+ self.params["fextract"]["backend"],
+ )
min_features = self.params["feature"]["minDetectedFeatures"].value
if len(nkp) < min_features or len(hkp) < min_features:
- log.debug("No acceptable best match after feature detection: "
- "only %s\\%s needle and %s\\%s haystack features detected",
- len(nkp), min_features, len(hkp), min_features)
+ log.debug(
+ "No acceptable best match after feature detection: "
+ "only %s\\%s needle and %s\\%s haystack features detected",
+ len(nkp),
+ min_features,
+ len(hkp),
+ min_features,
+ )
return None
- mnkp, mhkp = self._match_features(nkp, ndc, hkp, hdc,
- self.params["fmatch"]["backend"])
+ mnkp, mhkp = self._match_features(
+ nkp, ndc, hkp, hdc, self.params["fmatch"]["backend"]
+ )
min_features = self.params["feature"]["minMatchedFeatures"].value
if self.imglog.similarities[-1] < similarity or len(mnkp) < min_features:
- log.debug("No acceptable best match after feature matching:\n"
- "- matched features %s of %s required\n"
- "- best match similarity %s of %s required",
- len(mnkp), min_features,
- self.imglog.similarities[-1], similarity)
+ log.debug(
+ "No acceptable best match after feature matching:\n"
+ "- matched features %s of %s required\n"
+ "- best match similarity %s of %s required",
+ len(mnkp),
+ min_features,
+ self.imglog.similarities[-1],
+ similarity,
+ )
return None
locations_in_haystack = self._project_locations(locations_in_needle, mnkp, mhkp)
if self.imglog.similarities[-1] < similarity:
- log.debug("No acceptable best match after RANSAC projection: "
- "best match similarity %s is less than required %s",
- self.imglog.similarities[-1], similarity)
+ log.debug(
+ "No acceptable best match after RANSAC projection: "
+ "best match similarity %s is less than required %s",
+ self.imglog.similarities[-1],
+ similarity,
+ )
return None
else:
- self._log_features(30, self.imglog.locations, self.imglog.hotmaps[-1], 3, 0, 0, 255)
+ self._log_features(
+ 30, self.imglog.locations, self.imglog.hotmaps[-1], 3, 0, 0, 255
+ )
return locations_in_haystack
- def _detect_features(self, ngray: int, hgray: int, detect: str,
- extract: str) -> tuple[list[Any], list[Any], list[Any], list[Any]]:
+ def _detect_features(
+ self, ngray: int, hgray: int, detect: str, extract: str
+ ) -> tuple[list[Any], list[Any], list[Any], list[Any]]:
"""
EXTRA DOCSTRING: Feature matching backend - detection/extraction stage (1).
@@ -1421,6 +1789,7 @@ def _detect_features(self, ngray: int, hgray: int, detect: str,
# zoom in if explicitly set
import cv2
+
if nfactor > 1.0:
log.debug("Zooming x%i needle", nfactor)
ngray = cv2.resize(ngray, None, fx=nfactor, fy=nfactor)
@@ -1429,8 +1798,10 @@ def _detect_features(self, ngray: int, hgray: int, detect: str,
hgray = cv2.resize(hgray, None, fx=hfactor, fy=hfactor)
# include only methods tested for compatibility
- if (detect in self.algorithms["feature_detectors"]
- and extract in self.algorithms["feature_extractors"]):
+ if (
+ detect in self.algorithms["feature_detectors"]
+ and extract in self.algorithms["feature_extractors"]
+ ):
self.synchronize_backend(category="fdetect")
self.synchronize_backend(category="fextract")
@@ -1443,36 +1814,54 @@ def _detect_features(self, ngray: int, hgray: int, detect: str,
(hkeypoints, hdescriptors) = self.extractor.compute(hgray, hkeypoints)
else:
- raise UnsupportedBackendError("Feature detector %s is not among the supported"
- "ones %s" % (detect, self.algorithms[self.categories["fdetect"]]))
+ raise UnsupportedBackendError(
+ "Feature detector %s is not among the supported"
+ "ones %s" % (detect, self.algorithms[self.categories["fdetect"]])
+ )
# reduce keypoint coordinates to the original image size
for nkeypoint in nkeypoints:
- nkeypoint.pt = (int(nkeypoint.pt[0] / nfactor),
- int(nkeypoint.pt[1] / nfactor))
+ nkeypoint.pt = (
+ int(nkeypoint.pt[0] / nfactor),
+ int(nkeypoint.pt[1] / nfactor),
+ )
for hkeypoint in hkeypoints:
- hkeypoint.pt = (int(hkeypoint.pt[0] / hfactor),
- int(hkeypoint.pt[1] / hfactor))
-
- log.debug("Detected %s keypoints in needle and %s in haystack",
- len(nkeypoints), len(hkeypoints))
+ hkeypoint.pt = (
+ int(hkeypoint.pt[0] / hfactor),
+ int(hkeypoint.pt[1] / hfactor),
+ )
+
+ log.debug(
+ "Detected %s keypoints in needle and %s in haystack",
+ len(nkeypoints),
+ len(hkeypoints),
+ )
hkp_locations = [hkp.pt for hkp in hkeypoints]
self._log_features(10, hkp_locations, self.imglog.hotmaps[-4], 3, 255, 0, 0)
return (nkeypoints, ndescriptors, hkeypoints, hdescriptors)
- def _match_features(self, nkeypoints: str, ndescriptors: str,
- hkeypoints: str, hdescriptors: str,
- match: str) -> tuple[list[Any], list[Any]]:
+ def _match_features(
+ self,
+ nkeypoints: str,
+ ndescriptors: str,
+ hkeypoints: str,
+ hdescriptors: str,
+ match: str,
+ ) -> tuple[list[Any], list[Any]]:
"""
EXTRA DOCSTRING: Feature matching backend - matching stage (2).
Match two sets of keypoints based on their descriptors.
"""
+
def ratio_test(matches: list[Any]) -> list[Any]:
"""
- The ratio test checks the first and second best match. If their
- ratio is close to 1.0, there are both good candidates for the
+ Perform a ratio test.
+
+ The ratio test checks the first and second best match.
+
+ If their ratio is close to 1.0, there are both good candidates for the
match and the probabilty of error when choosing one is greater.
Therefore these matches are ignored and thus only matches of
greater probabilty are returned.
@@ -1485,7 +1874,10 @@ def ratio_test(matches: list[Any]) -> list[Any]:
smooth_dist1 = m[0].distance + 0.0000001
smooth_dist2 = m[1].distance + 0.0000001
- if smooth_dist1 / smooth_dist2 < self.params["fmatch"]["ratioThreshold"].value:
+ if (
+ smooth_dist1 / smooth_dist2
+ < self.params["fmatch"]["ratioThreshold"].value
+ ):
matches2.append(m[0])
else:
matches2.append(m[0])
@@ -1495,13 +1887,16 @@ def ratio_test(matches: list[Any]) -> list[Any]:
def symmetry_test(nmatches: list[Any], hmatches: list[Any]) -> list[Any]:
"""
- Refines the matches with a symmetry test which extracts
- only the matches in agreement with both the haystack and needle
- sets of keypoints. The two keypoints must be best feature
- matching of each other to ensure the error by accepting the
- match is not too large.
+ Perform a symmetry test.
+
+ The symmetry test refines the matches with a symmetry test which extracts
+ only in agreement with haystack and needle sets of keypoints.
+
+ The two keypoints must be best feature matching of each other
+ to ensure the error by accepting the match is not too large.
"""
import cv2
+
matches2 = []
for nm in nmatches:
for hm in hmatches:
@@ -1519,17 +1914,23 @@ def symmetry_test(nmatches: list[Any], hmatches: list[Any]) -> list[Any]:
# build matcher and match feature vectors
self.synchronize_backend(category="fmatch")
else:
- raise UnsupportedBackendError("Feature detector %s is not among the supported"
- "ones %s" % (match, self.algorithms[self.categories["fmatch"]]))
+ raise UnsupportedBackendError(
+ "Feature detector %s is not among the supported"
+ "ones %s" % (match, self.algorithms[self.categories["fmatch"]])
+ )
# find and filter matches through tests
if match == "in-house-region":
- matches = self.matcher.regionMatch(ndescriptors, hdescriptors,
- nkeypoints, hkeypoints,
- self.params["fmatch"]["refinements"].value,
- self.params["fmatch"]["recalc_interval"].value,
- self.params["fmatch"]["variants_k"].value,
- self.params["fmatch"]["variants_ratio"].value)
+ matches = self.matcher.regionMatch(
+ ndescriptors,
+ hdescriptors,
+ nkeypoints,
+ hkeypoints,
+ self.params["fmatch"]["refinements"].value,
+ self.params["fmatch"]["recalc_interval"].value,
+ self.params["fmatch"]["variants_k"].value,
+ self.params["fmatch"]["variants_ratio"].value,
+ )
else:
if self.params["fmatch"]["ratioTest"].value:
matches = self.matcher.knnMatch(ndescriptors, hdescriptors, 2)
@@ -1563,13 +1964,18 @@ def symmetry_test(nmatches: list[Any], hmatches: list[Any]) -> list[Any]:
# update the current achieved similarity if matching similarity is used:
# won't be updated anymore if self.params["feature"]["similarityRatio"].value == 0
self.imglog.similarities[-1] = match_similarity
- log.log(9, "%s\\%s -> %f", len(match_nkeypoints),
- len(nkeypoints), match_similarity)
+ log.log(
+ 9, "%s\\%s -> %f", len(match_nkeypoints), len(nkeypoints), match_similarity
+ )
return (match_nkeypoints, match_hkeypoints)
- def _project_locations(self, locations_in_needle: list[tuple[int, int]], mnkp: list[Any],
- mhkp: list[Any]) -> list[tuple[int, int]]:
+ def _project_locations(
+ self,
+ locations_in_needle: list[tuple[int, int]],
+ mnkp: list[Any],
+ mhkp: list[Any],
+ ) -> list[tuple[int, int]]:
"""
EXTRA DOCSTRING: Feature matching backend - projecting stage (3).
@@ -1594,20 +2000,29 @@ def _project_locations(self, locations_in_needle: list[tuple[int, int]], mnkp: l
import cv2
import numpy
+
# homography and fundamental matrix as options - homography is considered only
# for rotation but currently gives better results than the fundamental matrix
if self.params["feature"]["projectionMethod"].value == 0:
- H, mask = cv2.findHomography(numpy.array([kp.pt for kp in mnkp]),
- numpy.array([kp.pt for kp in mhkp]), cv2.RANSAC,
- self.params["feature"]["ransacReprojThreshold"].value)
+ H, mask = cv2.findHomography(
+ numpy.array([kp.pt for kp in mnkp]),
+ numpy.array([kp.pt for kp in mhkp]),
+ cv2.RANSAC,
+ self.params["feature"]["ransacReprojThreshold"].value,
+ )
elif self.params["feature"]["projectionMethod"].value == 1:
- H, mask = cv2.findFundamentalMat(numpy.array([kp.pt for kp in mnkp]),
- numpy.array([kp.pt for kp in mhkp]),
- method=cv2.RANSAC, param1=10.0,
- param2=0.9)
+ H, mask = cv2.findFundamentalMat(
+ numpy.array([kp.pt for kp in mnkp]),
+ numpy.array([kp.pt for kp in mhkp]),
+ method=cv2.RANSAC,
+ param1=10.0,
+ param2=0.9,
+ )
else:
- raise ValueError("Unsupported projection method - use 0 for homography and "
- "1 for fundamentlal matrix")
+ raise ValueError(
+ "Unsupported projection method - use 0 for homography and "
+ "1 for fundamentlal matrix"
+ )
# measure total used features for the projected focus point
if H is None or mask is None:
@@ -1643,6 +2058,8 @@ def _project_locations(self, locations_in_needle: list[tuple[int, int]], mnkp: l
def log(self, lvl: int) -> None:
"""
+ Log images with an arbitrary logging level.
+
Custom implementation of the base method.
See base method for details.
@@ -1656,7 +2073,9 @@ def log(self, lvl: int) -> None:
return
# no hotmaps to log
elif len(self.imglog.hotmaps) == 0:
- raise MissingHotmapError("No matching was performed in order to be image logged")
+ raise MissingHotmapError(
+ "No matching was performed in order to be image logged"
+ )
stages = ["detect", "match", "project", ""]
for i, stage in enumerate(stages):
@@ -1665,22 +2084,35 @@ def log(self, lvl: int) -> None:
if self.imglog.logging_level > 20 and stage == "project":
continue
if stage == "":
- name = "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step,
- self.imglog.similarities[-1])
+ name = "imglog%s-3hotmap-%s.png" % (
+ self.imglog.printable_step,
+ self.imglog.similarities[-1],
+ )
else:
- name = "imglog%s-3hotmap-%s%s.png" % (self.imglog.printable_step,
- i+1, stage)
+ name = "imglog%s-3hotmap-%s%s.png" % (
+ self.imglog.printable_step,
+ i + 1,
+ stage,
+ )
self.imglog.dump_hotmap(name, self.imglog.hotmaps[i])
self.imglog.clear()
ImageLogger.step += 1
- def _log_features(self, lvl: int, locations: list[tuple[float, float]], hotmap: "Matlike",
- radius: int = 0, r: int = 255, g: int = 255,
- b: int = 255) -> None:
+ def _log_features(
+ self,
+ lvl: int,
+ locations: list[tuple[float, float]],
+ hotmap: "Matlike",
+ radius: int = 0,
+ r: int = 255,
+ g: int = 255,
+ b: int = 255,
+ ) -> None:
if lvl < self.imglog.logging_level:
return
import cv2
+
for loc in locations:
x, y = loc
cv2.circle(hotmap, (int(x), int(y)), radius, (r, g, b))
@@ -1700,8 +2132,12 @@ class CascadeFinder(Finder):
due to the cascade classifier API.
"""
- def __init__(self, classifier_datapath: str = ".", configure: bool = True,
- synchronize: bool = True) -> None:
+ def __init__(
+ self,
+ classifier_datapath: str = ".",
+ configure: bool = True,
+ synchronize: bool = True,
+ ) -> None:
"""Build a CV backend using OpenCV's cascade matching options."""
super(CascadeFinder, self).__init__(configure=False, synchronize=False)
@@ -1709,15 +2145,13 @@ def __init__(self, classifier_datapath: str = ".", configure: bool = True,
if configure:
self.__configure_backend(reset=True)
- def __configure_backend(self, backend: str = None, category: str = "cascade",
- reset: bool = False) -> None:
- """
- Custom implementation of the base method.
-
- See base method for details.
- """
+ def __configure_backend(
+ self, backend: str = None, category: str = "cascade", reset: bool = False
+ ) -> None:
if category != "cascade":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(CascadeFinder, self).configure_backend("cascade", reset=True)
@@ -1730,9 +2164,12 @@ def __configure_backend(self, backend: str = None, category: str = "cascade",
self.params[category]["minHeight"] = CVParameter(0, 0, None, 100.0)
self.params[category]["maxHeight"] = CVParameter(1000, 0, None, 100.0)
- def configure_backend(self, backend: str = None, category: str = "cascade",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "cascade", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
@@ -1741,6 +2178,8 @@ def configure_backend(self, backend: str = None, category: str = "cascade",
def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]":
"""
+ Find all needle targets in a haystack image.
+
Custom implementation of the base method.
:param needle: target pattern (cascade) to search for
@@ -1755,25 +2194,35 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]":
import cv2
import numpy
+
needle_cascade = cv2.CascadeClassifier(needle.data_file)
if needle_cascade.empty():
raise Exception("Could not load the cascade classifier properly")
- gray_haystack = cv2.cvtColor(numpy.array(haystack.pil_image), cv2.COLOR_RGB2GRAY)
+ gray_haystack = cv2.cvtColor(
+ numpy.array(haystack.pil_image), cv2.COLOR_RGB2GRAY
+ )
canvas = numpy.array(haystack.pil_image)
from .match import Match
+
matches = []
- rects = needle_cascade.detectMultiScale(gray_haystack,
- self.params["cascade"]["scaleFactor"].value,
- self.params["cascade"]["minNeighbors"].value,
- 0,
- (self.params["cascade"]["minWidth"].value,
- self.params["cascade"]["minHeight"].value),
- (self.params["cascade"]["maxWidth"].value,
- self.params["cascade"]["maxHeight"].value))
- for (x, y, w, h) in rects:
- cv2.rectangle(canvas, (x, y), (x+w, y+h), (0, 0, 0), 2)
- cv2.rectangle(canvas, (x, y), (x+w, y+h), (255, 0, 0), 1)
+ rects = needle_cascade.detectMultiScale(
+ gray_haystack,
+ self.params["cascade"]["scaleFactor"].value,
+ self.params["cascade"]["minNeighbors"].value,
+ 0,
+ (
+ self.params["cascade"]["minWidth"].value,
+ self.params["cascade"]["minHeight"].value,
+ ),
+ (
+ self.params["cascade"]["maxWidth"].value,
+ self.params["cascade"]["maxHeight"].value,
+ ),
+ )
+ for x, y, w, h in rects:
+ cv2.rectangle(canvas, (x, y), (x + w, y + h), (0, 0, 0), 2)
+ cv2.rectangle(canvas, (x, y), (x + w, y + h), (255, 0, 0), 1)
dx, dy = needle.center_offset.x, needle.center_offset.y
matches.append(Match(x, y, w, h, dx, dy))
@@ -1809,10 +2258,26 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
self.categories["threshold2"] = "threshold_filters2"
self.categories["threshold3"] = "threshold_filters3"
self.algorithms["text_matchers"] = ("mixed",)
- self.algorithms["text_detectors"] = ("pytesseract", "east", "erstat", "contours", "components")
- self.algorithms["text_recognizers"] = ("pytesseract", "tesserocr", "tesseract", "hmm", "beamSearch")
- self.algorithms["threshold_filters2"] = tuple(self.algorithms["threshold_filters"])
- self.algorithms["threshold_filters3"] = tuple(self.algorithms["threshold_filters"])
+ self.algorithms["text_detectors"] = (
+ "pytesseract",
+ "east",
+ "erstat",
+ "contours",
+ "components",
+ )
+ self.algorithms["text_recognizers"] = (
+ "pytesseract",
+ "tesserocr",
+ "tesseract",
+ "hmm",
+ "beamSearch",
+ )
+ self.algorithms["threshold_filters2"] = tuple(
+ self.algorithms["threshold_filters"]
+ )
+ self.algorithms["threshold_filters3"] = tuple(
+ self.algorithms["threshold_filters"]
+ )
# other attributes
self.erc1 = None
@@ -1827,15 +2292,21 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
if synchronize:
self.__synchronize(reset=False)
- def __configure_backend(self, backend: str = None, category: str = "text",
- reset: bool = False) -> None:
- """
- Custom implementation of the base method.
-
- See base method for details.
- """
- if category not in ["text", "tdetect", "ocr", "contour", "threshold", "threshold2", "threshold3"]:
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ def __configure_backend(
+ self, backend: str = None, category: str = "text", reset: bool = False
+ ) -> None:
+ if category not in [
+ "text",
+ "tdetect",
+ "ocr",
+ "contour",
+ "threshold",
+ "threshold2",
+ "threshold3",
+ ]:
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
elif category in ["contour", "threshold"]:
ContourFinder.configure_backend(self, backend, category, reset)
return
@@ -1859,8 +2330,10 @@ def __configure_backend(self, backend: str = None, category: str = "text",
elif category == "ocr" and backend is None:
backend = GlobalConfig.text_ocr_backend
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
log.log(9, "Setting backend for %s to %s", category, backend)
self.params[category] = {}
@@ -1872,8 +2345,9 @@ def __configure_backend(self, backend: str = None, category: str = "text",
if backend == "pytesseract":
# eng, deu, etc. (ISO 639-3)
self.params[category]["language"] = CVParameter("eng")
- self.params[category]["char_whitelist"] = CVParameter(" 0123456789abcdefghijklmnopqrst"
- "uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ self.params[category]["char_whitelist"] = CVParameter(
+ " 0123456789abcdefghijklmnopqrst" "uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ )
# 0 original tesseract only, 1 neural nets LSTM only, 2 both, 3 anything available
self.params[category]["oem"] = CVParameter(3, 0, 3, enumerated=True)
# 13 different page segmentation modes - see Tesseract API
@@ -1881,34 +2355,62 @@ def __configure_backend(self, backend: str = None, category: str = "text",
self.params[category]["extra_configs"] = CVParameter("")
self.params[category]["binarize_detection"] = CVParameter(False)
self.params[category]["segment_line_max"] = CVParameter(1, 1, None, 1.0)
- self.params[category]["recursion_height"] = CVParameter(0.3, 0.0, 1.0, 0.01)
- self.params[category]["recursion_width"] = CVParameter(0.3, 0.0, 1.0, 0.01)
+ self.params[category]["recursion_height"] = CVParameter(
+ 0.3, 0.0, 1.0, 0.01
+ )
+ self.params[category]["recursion_width"] = CVParameter(
+ 0.3, 0.0, 1.0, 0.01
+ )
elif backend == "east":
# network input dimensions - must be divisible by 32, however currently only
# 320x320 doesn't error out from the OpenCV implementation
self.params[category]["input_res_x"] = CVParameter(320, 32, None, 32.0)
self.params[category]["input_res_y"] = CVParameter(320, 32, None, 32.0)
- self.params[category]["min_box_confidence"] = CVParameter(0.8, 0.0, 1.0, 0.1)
+ self.params[category]["min_box_confidence"] = CVParameter(
+ 0.8, 0.0, 1.0, 0.1
+ )
elif backend == "erstat":
self.params[category]["thresholdDelta"] = CVParameter(1, 1, 255, 50.0)
- self.params[category]["minArea"] = CVParameter(0.00025, 0.0, 1.0, 0.25, 0.001)
- self.params[category]["maxArea"] = CVParameter(0.13, 0.0, 1.0, 0.25, 0.001)
- self.params[category]["minProbability"] = CVParameter(0.4, 0.0, 1.0, 0.25, 0.01)
+ self.params[category]["minArea"] = CVParameter(
+ 0.00025, 0.0, 1.0, 0.25, 0.001
+ )
+ self.params[category]["maxArea"] = CVParameter(
+ 0.13, 0.0, 1.0, 0.25, 0.001
+ )
+ self.params[category]["minProbability"] = CVParameter(
+ 0.4, 0.0, 1.0, 0.25, 0.01
+ )
self.params[category]["nonMaxSuppression"] = CVParameter(True)
- self.params[category]["minProbabilityDiff"] = CVParameter(0.1, 0.0, 1.0, 0.25, 0.01)
- self.params[category]["minProbability2"] = CVParameter(0.3, 0.0, 1.0, 0.25, 0.01)
+ self.params[category]["minProbabilityDiff"] = CVParameter(
+ 0.1, 0.0, 1.0, 0.25, 0.01
+ )
+ self.params[category]["minProbability2"] = CVParameter(
+ 0.3, 0.0, 1.0, 0.25, 0.01
+ )
elif backend == "contours":
- self.params[category]["maxArea"] = CVParameter(10000, 0, None, 1000.0, 10.0)
+ self.params[category]["maxArea"] = CVParameter(
+ 10000, 0, None, 1000.0, 10.0
+ )
self.params[category]["minWidth"] = CVParameter(1, 0, None, 100.0)
self.params[category]["maxWidth"] = CVParameter(100, 0, None, 100.0)
self.params[category]["minHeight"] = CVParameter(1, 0, None, 100.0)
self.params[category]["maxHeight"] = CVParameter(100, 0, None, 100.0)
- self.params[category]["minAspectRatio"] = CVParameter(0.1, 0.0, None, 10.0)
- self.params[category]["maxAspectRatio"] = CVParameter(2.5, 0.0, None, 10.0)
- self.params[category]["horizontalSpacing"] = CVParameter(10, 0, None, 10.0)
- self.params[category]["verticalVariance"] = CVParameter(10, 0, None, 10.0)
+ self.params[category]["minAspectRatio"] = CVParameter(
+ 0.1, 0.0, None, 10.0
+ )
+ self.params[category]["maxAspectRatio"] = CVParameter(
+ 2.5, 0.0, None, 10.0
+ )
+ self.params[category]["horizontalSpacing"] = CVParameter(
+ 10, 0, None, 10.0
+ )
+ self.params[category]["verticalVariance"] = CVParameter(
+ 10, 0, None, 10.0
+ )
# 0 horizontal, 1 vertical
- self.params[category]["orientation"] = CVParameter(0, 0, 1, enumerated=True)
+ self.params[category]["orientation"] = CVParameter(
+ 0, 0, 1, enumerated=True
+ )
self.params[category]["minChars"] = CVParameter(3, 0, None, 2.0)
elif backend == "components":
# with equal delta and tolerance we ensure that only one failure will be
@@ -1918,8 +2420,9 @@ def __configure_backend(self, backend: str = None, category: str = "text",
if backend in ["tesseract", "tesserocr", "pytesseract"]:
# eng, deu, etc. (ISO 639-3)
self.params[category]["language"] = CVParameter("eng")
- self.params[category]["char_whitelist"] = CVParameter(" 0123456789abcdefghijklmnopqrst"
- "uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ self.params[category]["char_whitelist"] = CVParameter(
+ " 0123456789abcdefghijklmnopqrst" "uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ )
# 0 original tesseract only, 1 neural nets LSTM only, 2 both, 3 anything available
self.params[category]["oem"] = CVParameter(3, 0, 3, enumerated=True)
# 13 different page segmentation modes - see Tesseract API
@@ -1927,20 +2430,30 @@ def __configure_backend(self, backend: str = None, category: str = "text",
if backend == "pytesseract":
self.params[category]["extra_configs"] = CVParameter("")
# TODO: there could be a decent way to change component modes
- self.params[category]["component_level"] = CVParameter(1, 1, 1, enumerated=True)
+ self.params[category]["component_level"] = CVParameter(
+ 1, 1, 1, enumerated=True
+ )
elif backend == "tesserocr":
# TODO: there could be a decent way to change component modes
- self.params[category]["component_level"] = CVParameter(1, 1, 1, enumerated=True)
+ self.params[category]["component_level"] = CVParameter(
+ 1, 1, 1, enumerated=True
+ )
else:
# 0 OCR_LEVEL_WORD, 1 OCR_LEVEL_TEXT_LINE
- self.params[category]["component_level"] = CVParameter(1, 0, 1, enumerated=True)
+ self.params[category]["component_level"] = CVParameter(
+ 1, 0, 1, enumerated=True
+ )
# perform custom image thresholding if set to true or leave it to the OCR
self.params[category]["binarize_text"] = CVParameter(False)
elif backend == "hmm":
# 1 NM 2 CNN as classifiers for hidden markov models (see OpenCV documentation)
- self.params[category]["classifier"] = CVParameter(1, 1, 2, enumerated=True)
+ self.params[category]["classifier"] = CVParameter(
+ 1, 1, 2, enumerated=True
+ )
# 0 OCR_LEVEL_WORD
- self.params[category]["component_level"] = CVParameter(0, 0, 1, enumerated=True)
+ self.params[category]["component_level"] = CVParameter(
+ 0, 0, 1, enumerated=True
+ )
# perform custom image thresholding if set to true or leave it to the OCR
self.params[category]["binarize_text"] = CVParameter(True)
else:
@@ -1952,30 +2465,49 @@ def __configure_backend(self, backend: str = None, category: str = "text",
# border size to wrap around text field to improve recognition rate
self.params[category]["border_size"] = CVParameter(10, 0, 100, 25.0)
# 0 erode, 1 dilate, 2 both, 3 none
- self.params[category]["erode_dilate"] = CVParameter(3, 0, 3, enumerated=True)
+ self.params[category]["erode_dilate"] = CVParameter(
+ 3, 0, 3, enumerated=True
+ )
# 0 MORPH_RECT, 1 MORPH_ELLIPSE, 2 MORPH_CROSS
- self.params[category]["ed_kernel_type"] = CVParameter(0, 0, 2, enumerated=True)
- self.params[category]["ed_kernel_width"] = CVParameter(1, 1, 1000, 250.0, 2.0)
- self.params[category]["ed_kernel_height"] = CVParameter(1, 1, 1000, 250.0, 2.0)
+ self.params[category]["ed_kernel_type"] = CVParameter(
+ 0, 0, 2, enumerated=True
+ )
+ self.params[category]["ed_kernel_width"] = CVParameter(
+ 1, 1, 1000, 250.0, 2.0
+ )
+ self.params[category]["ed_kernel_height"] = CVParameter(
+ 1, 1, 1000, 250.0, 2.0
+ )
# perform distance transform if ture or not if false
self.params[category]["distance_transform"] = CVParameter(False)
# 1 CV_DIST_L1, 2 CV_DIST_L2, 3 CV_DIST_C
- self.params[category]["dt_distance_type"] = CVParameter(1, 1, 3, enumerated=True)
+ self.params[category]["dt_distance_type"] = CVParameter(
+ 1, 1, 3, enumerated=True
+ )
# 0 (precise) or 3x3 or 5x5 (the latest only works with Euclidean distance CV_DIST_L2)
self.params[category]["dt_mask_size"] = CVParameter(3, 0, 5, 8.0, 2.0)
- def configure_backend(self, backend: str = None, category: str = "text",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "text", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __configure(self, text_detector: str = None, text_recognizer: str = None,
- threshold_filter: str = None, threshold_filter2: str = None,
- threshold_filter3: str = None, reset: bool = True) -> None:
+ def __configure(
+ self,
+ text_detector: str = None,
+ text_recognizer: str = None,
+ threshold_filter: str = None,
+ threshold_filter2: str = None,
+ threshold_filter3: str = None,
+ reset: bool = True,
+ ) -> None:
self.__configure_backend(category="text", reset=reset)
self.__configure_backend(text_detector, "tdetect")
self.__configure_backend(text_recognizer, "ocr")
@@ -1984,11 +2516,19 @@ def __configure(self, text_detector: str = None, text_recognizer: str = None,
self.__configure_backend(threshold_filter2, "threshold2")
self.__configure_backend(threshold_filter3, "threshold3")
- def configure(self, text_detector: str = None, text_recognizer: str = None,
- threshold_filter: str = None, threshold_filter2: str = None,
- threshold_filter3: str = None, reset: bool = True,
- **kwargs: dict[str, type]) -> None:
+ def configure(
+ self,
+ text_detector: str = None,
+ text_recognizer: str = None,
+ threshold_filter: str = None,
+ threshold_filter2: str = None,
+ threshold_filter3: str = None,
+ reset: bool = True,
+ **kwargs: dict[str, type]
+ ) -> None:
"""
+ Generate configuration dictionary for all backends.
+
Custom implementation of the base method.
:param text_detector: name of a preselected backend
@@ -1998,21 +2538,40 @@ def configure(self, text_detector: str = None, text_recognizer: str = None,
:param threshold_filter3: additional threshold filter for distance transformation
:param reset: whether to (re)set all parent configurations as well
"""
- self.__configure(text_detector, text_recognizer,
- threshold_filter, threshold_filter2, threshold_filter3,
- reset)
-
- def __synchronize_backend(self, backend: str = None, category: str = "text",
- reset: bool = False) -> None:
- if category not in ["text", "tdetect", "ocr", "contour", "threshold", "threshold2", "threshold3"]:
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ self.__configure(
+ text_detector,
+ text_recognizer,
+ threshold_filter,
+ threshold_filter2,
+ threshold_filter3,
+ reset,
+ )
+
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "text", reset: bool = False
+ ) -> None:
+ if category not in [
+ "text",
+ "tdetect",
+ "ocr",
+ "contour",
+ "threshold",
+ "threshold2",
+ "threshold3",
+ ]:
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
Finder.synchronize_backend(self, "text", reset=True)
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
backend = self.params[category]["backend"]
import cv2
+
datapath = self.params["text"]["datapath"].value
tessdata_path = os.path.join(datapath, "tessdata")
if not os.path.exists(tessdata_path):
@@ -2026,28 +2585,47 @@ def __synchronize_backend(self, backend: str = None, category: str = "text",
elif category == "tdetect" and backend == "pytesseract":
import pytesseract
+
self.tbox = pytesseract
- tessdata_dir = "--tessdata-dir '" + tessdata_path + "'" if tessdata_path else ""
+ tessdata_dir = (
+ "--tessdata-dir '" + tessdata_path + "'" if tessdata_path else ""
+ )
self.tbox_config = r"%s --oem %s --psm %s "
- self.tbox_config %= (tessdata_dir,
- self.params["tdetect"]["oem"].value,
- self.params["tdetect"]["psmode"].value)
- self.tbox_config += r"-c tessedit_char_whitelist='%s' %s batch.nochop wordstrbox"
- self.tbox_config %= (self.params["tdetect"]["char_whitelist"].value,
- self.params["tdetect"]["extra_configs"].value)
+ self.tbox_config %= (
+ tessdata_dir,
+ self.params["tdetect"]["oem"].value,
+ self.params["tdetect"]["psmode"].value,
+ )
+ self.tbox_config += (
+ r"-c tessedit_char_whitelist='%s' %s batch.nochop wordstrbox"
+ )
+ self.tbox_config %= (
+ self.params["tdetect"]["char_whitelist"].value,
+ self.params["tdetect"]["extra_configs"].value,
+ )
elif category == "tdetect" and backend == "east":
- self.east_net = cv2.dnn.readNet(os.path.join(datapath, 'frozen_east_text_detection.pb'))
+ self.east_net = cv2.dnn.readNet(
+ os.path.join(datapath, "frozen_east_text_detection.pb")
+ )
elif category == "tdetect" and backend == "erstat":
- self.erc1 = cv2.text.loadClassifierNM1(os.path.join(datapath, 'trained_classifierNM1.xml'))
- self.erf1 = cv2.text.createERFilterNM1(self.erc1,
- self.params["tdetect"]["thresholdDelta"].value,
- self.params["tdetect"]["minArea"].value,
- self.params["tdetect"]["maxArea"].value,
- self.params["tdetect"]["minProbability"].value,
- self.params["tdetect"]["nonMaxSuppression"].value,
- self.params["tdetect"]["minProbabilityDiff"].value)
- self.erc2 = cv2.text.loadClassifierNM2(os.path.join(datapath, 'trained_classifierNM2.xml'))
- self.erf2 = cv2.text.createERFilterNM2(self.erc2, self.params["tdetect"]["minProbability2"].value)
+ self.erc1 = cv2.text.loadClassifierNM1(
+ os.path.join(datapath, "trained_classifierNM1.xml")
+ )
+ self.erf1 = cv2.text.createERFilterNM1(
+ self.erc1,
+ self.params["tdetect"]["thresholdDelta"].value,
+ self.params["tdetect"]["minArea"].value,
+ self.params["tdetect"]["maxArea"].value,
+ self.params["tdetect"]["minProbability"].value,
+ self.params["tdetect"]["nonMaxSuppression"].value,
+ self.params["tdetect"]["minProbabilityDiff"].value,
+ )
+ self.erc2 = cv2.text.loadClassifierNM2(
+ os.path.join(datapath, "trained_classifierNM2.xml")
+ )
+ self.erf2 = cv2.text.createERFilterNM2(
+ self.erc2, self.params["tdetect"]["minProbability2"].value
+ )
elif category == "tdetect":
# nothing to sync
return
@@ -2055,30 +2633,45 @@ def __synchronize_backend(self, backend: str = None, category: str = "text",
elif category == "ocr":
if backend == "pytesseract":
import pytesseract
+
self.ocr = pytesseract
- tessdata_dir = "--tessdata-dir '" + tessdata_path + "'" if tessdata_path else ""
+ tessdata_dir = (
+ "--tessdata-dir '" + tessdata_path + "'" if tessdata_path else ""
+ )
self.ocr_config = r"%s --oem %s --psm %s "
- self.ocr_config %= (tessdata_dir,
- self.params["ocr"]["oem"].value,
- self.params["ocr"]["psmode"].value)
+ self.ocr_config %= (
+ tessdata_dir,
+ self.params["ocr"]["oem"].value,
+ self.params["ocr"]["psmode"].value,
+ )
self.ocr_config += r"-c tessedit_char_whitelist='%s' %s"
- self.ocr_config %= (self.params["ocr"]["char_whitelist"].value,
- self.params["ocr"]["extra_configs"].value)
+ self.ocr_config %= (
+ self.params["ocr"]["char_whitelist"].value,
+ self.params["ocr"]["extra_configs"].value,
+ )
elif backend == "tesserocr":
from tesserocr import PyTessBaseAPI
- kwargs = {"lang": self.params["ocr"]["language"].value,
- "oem": self.params["ocr"]["oem"].value,
- "psm": self.params["ocr"]["psmode"].value}
+
+ kwargs = {
+ "lang": self.params["ocr"]["language"].value,
+ "oem": self.params["ocr"]["oem"].value,
+ "psm": self.params["ocr"]["psmode"].value,
+ }
if tessdata_path:
self.ocr = PyTessBaseAPI(path=tessdata_path, **kwargs)
else:
self.ocr = PyTessBaseAPI(**kwargs)
- self.ocr.SetVariable("tessedit_char_whitelist", self.params["ocr"]["char_whitelist"].value)
+ self.ocr.SetVariable(
+ "tessedit_char_whitelist",
+ self.params["ocr"]["char_whitelist"].value,
+ )
elif backend == "tesseract":
- kwargs = {"language": self.params["ocr"]["language"].value,
- "char_whitelist": self.params["ocr"]["char_whitelist"].value,
- "oem": self.params["ocr"]["oem"].value,
- "psmode": self.params["ocr"]["psmode"].value}
+ kwargs = {
+ "language": self.params["ocr"]["language"].value,
+ "char_whitelist": self.params["ocr"]["char_whitelist"].value,
+ "oem": self.params["ocr"]["oem"].value,
+ "psmode": self.params["ocr"]["psmode"].value,
+ }
if tessdata_path:
self.ocr = cv2.text.OCRTesseract_create(datapath, **kwargs)
else:
@@ -2086,44 +2679,73 @@ def __synchronize_backend(self, backend: str = None, category: str = "text",
elif backend in ["hmm", "beamSearch"]:
import numpy
+
# vocabulary is strictly related with the XML data so remains hardcoded here
- vocabulary = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
- with open(os.path.join(datapath, 'OCRHMM_transitions_table.xml')) as f:
+ vocabulary = (
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+ )
+ with open(os.path.join(datapath, "OCRHMM_transitions_table.xml")) as f:
transition_p_xml = f.read()
- transition_p_data = re.search("(.*)",
- transition_p_xml.replace("\n", " "))
- assert transition_p_data is not None, "Corrupted transition probability data"
- transition_p = numpy.fromstring(transition_p_data.group(1).strip(), sep=' ').reshape(62, 62)
+ transition_p_data = re.search(
+ "(.*)", transition_p_xml.replace("\n", " ")
+ )
+ assert (
+ transition_p_data is not None
+ ), "Corrupted transition probability data"
+ transition_p = numpy.fromstring(
+ transition_p_data.group(1).strip(), sep=" "
+ ).reshape(62, 62)
emission_p = numpy.eye(62, dtype=numpy.float64)
if backend == "hmm":
- classifier_data = os.path.join(datapath, 'OCRHMM_knn_model_data.xml.gz')
+ classifier_data = os.path.join(
+ datapath, "OCRHMM_knn_model_data.xml.gz"
+ )
if self.params["ocr"]["classifier"].value == 1:
classifier = cv2.text.loadOCRHMMClassifierNM(classifier_data)
elif self.params["ocr"]["classifier"].value == 2:
classifier = cv2.text.loadOCRHMMClassifierCNN(classifier_data)
else:
- raise ValueError("Invalid classifier selected for OCR - must be NM or CNN")
- self.ocr = cv2.text.OCRHMMDecoder_create(classifier, vocabulary, transition_p, emission_p)
+ raise ValueError(
+ "Invalid classifier selected for OCR - must be NM or CNN"
+ )
+ self.ocr = cv2.text.OCRHMMDecoder_create(
+ classifier, vocabulary, transition_p, emission_p
+ )
else:
- classifier_data = os.path.join(datapath, 'OCRBeamSearch_CNN_model_data.xml.gz')
- classifier = cv2.text.loadOCRBeamSearchClassifierCNN(classifier_data)
- self.ocr = cv2.text.OCRBeamSearchDecoder_create(classifier, vocabulary, transition_p, emission_p)
+ classifier_data = os.path.join(
+ datapath, "OCRBeamSearch_CNN_model_data.xml.gz"
+ )
+ classifier = cv2.text.loadOCRBeamSearchClassifierCNN(
+ classifier_data
+ )
+ self.ocr = cv2.text.OCRBeamSearchDecoder_create(
+ classifier, vocabulary, transition_p, emission_p
+ )
else:
raise ValueError("Invalid OCR backend '%s'" % backend)
- def synchronize_backend(self, backend: str = None, category: str = "text",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "text", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
"""
self.__synchronize_backend(backend, category, reset)
- def __synchronize(self, text_detector: str = None, text_recognizer: str = None,
- threshold_filter: str = None, threshold_filter2: str = None,
- threshold_filter3: str = None, reset: bool = True) -> None:
+ def __synchronize(
+ self,
+ text_detector: str = None,
+ text_recognizer: str = None,
+ threshold_filter: str = None,
+ threshold_filter2: str = None,
+ threshold_filter3: str = None,
+ reset: bool = True,
+ ) -> None:
self.__synchronize_backend(category="text", reset=reset)
self.__synchronize_backend(text_detector, "tdetect")
self.__synchronize_backend(text_recognizer, "ocr")
@@ -2132,10 +2754,18 @@ def __synchronize(self, text_detector: str = None, text_recognizer: str = None,
self.__synchronize_backend(threshold_filter2, "threshold2")
self.__synchronize_backend(threshold_filter3, "threshold3")
- def synchronize(self, text_detector: str = None, text_recognizer: str = None,
- threshold_filter: str = None, threshold_filter2: str = None,
- threshold_filter3: str = None, reset: bool = True) -> None:
+ def synchronize(
+ self,
+ text_detector: str = None,
+ text_recognizer: str = None,
+ threshold_filter: str = None,
+ threshold_filter2: str = None,
+ threshold_filter3: str = None,
+ reset: bool = True,
+ ) -> None:
"""
+ Synchronize all backends with the current configuration dictionary.
+
Custom implementation of the base method.
:param text_detector: name of a preselected backend
@@ -2145,12 +2775,19 @@ def synchronize(self, text_detector: str = None, text_recognizer: str = None,
:param threshold_filter3: additional threshold filter for distance transformation
:param reset: whether to (re)set all parent configurations as well
"""
- self.__synchronize(text_detector, text_recognizer,
- threshold_filter, threshold_filter2, threshold_filter3,
- reset)
+ self.__synchronize(
+ text_detector,
+ text_recognizer,
+ threshold_filter,
+ threshold_filter2,
+ threshold_filter3,
+ reset,
+ )
def find(self, needle: "Text", haystack: "Image") -> "list[Match]":
"""
+ Find all needle targets in a haystack image.
+
Custom implementation of the base method.
:param needle: target text to search for
@@ -2165,6 +2802,7 @@ def find(self, needle: "Text", haystack: "Image") -> "list[Match]":
import cv2
import numpy
+
text_needle = needle.value
img_haystack = numpy.array(haystack.pil_image)
final_hotmap = numpy.array(haystack.pil_image)
@@ -2183,13 +2821,17 @@ def find(self, needle: "Text", haystack: "Image") -> "list[Match]":
elif backend == "components":
text_regions = self._detect_text_components(haystack)
else:
- raise UnsupportedBackendError("Unsupported text detection backend %s" % backend)
+ raise UnsupportedBackendError(
+ "Unsupported text detection backend %s" % backend
+ )
# perform optical character recognition on the final regions
backend = self.params["ocr"]["backend"]
log.debug("Recognizing text with %s", backend)
from .match import Match
+
matches = []
+
def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike":
if self.params["ocr"]["binarize_text"].value:
first_threshold = self.params["threshold"]
@@ -2201,26 +2843,41 @@ def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike":
return text_img
else:
return cv2.cvtColor(text_img, cv2.COLOR_RGB2GRAY)
+
for i, text_box in enumerate(text_regions):
# main OCR preprocessing stage
border = self.params["ocr"]["border_size"].value
- text_img = img_haystack[max(text_box[1]-border, 0):min(text_box[1]+text_box[3]+border, img_haystack.shape[0]),
- max(text_box[0]-border, 0):min(text_box[0]+text_box[2]+border, img_haystack.shape[1])]
+ text_img = img_haystack[
+ max(text_box[1] - border, 0) : min(
+ text_box[1] + text_box[3] + border, img_haystack.shape[0]
+ ),
+ max(text_box[0] - border, 0) : min(
+ text_box[0] + text_box[2] + border, img_haystack.shape[1]
+ ),
+ ]
factor = self.params["ocr"]["zoom_factor"].value
log.debug("Zooming x%i candidate for improved OCR processing", factor)
text_img = cv2.resize(text_img, None, fx=factor, fy=factor)
text_img = binarize_step("threshold2", text_img)
if self.params["ocr"]["distance_transform"].value:
- text_img = cv2.distanceTransform(text_img,
- self.params["ocr"]["dt_distance_type"].value,
- self.params["ocr"]["dt_mask_size"].value)
- text_img = cv2.cvtColor(numpy.asarray(text_img, dtype='uint8'), cv2.COLOR_GRAY2RGB)
+ text_img = cv2.distanceTransform(
+ text_img,
+ self.params["ocr"]["dt_distance_type"].value,
+ self.params["ocr"]["dt_mask_size"].value,
+ )
+ text_img = cv2.cvtColor(
+ numpy.asarray(text_img, dtype="uint8"), cv2.COLOR_GRAY2RGB
+ )
text_img = binarize_step("threshold3", text_img)
if self.params["ocr"]["erode_dilate"].value < 3:
- element = cv2.getStructuringElement(self.params["ocr"]["ed_kernel_type"].value,
- (self.params["ocr"]["ed_kernel_width"].value,
- self.params["ocr"]["ed_kernel_height"].value))
+ element = cv2.getStructuringElement(
+ self.params["ocr"]["ed_kernel_type"].value,
+ (
+ self.params["ocr"]["ed_kernel_width"].value,
+ self.params["ocr"]["ed_kernel_height"].value,
+ ),
+ )
if self.params["ocr"]["erode_dilate"].value in [0, 2]:
text_img = cv2.erode(text_img, element)
if self.params["ocr"]["erode_dilate"].value in [1, 2]:
@@ -2229,35 +2886,44 @@ def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike":
# BUG: we hit segfault when using the BeamSearch OCR backend so disallow it
if backend == "beamSearch":
- raise NotImplementedError("Current version of BeamSearch segfaults so it's not yet available")
+ raise NotImplementedError(
+ "Current version of BeamSearch segfaults so it's not yet available"
+ )
# TODO: we can do this now with pytesseract/tesserocr but have to evaluate its usefulness
- #vector boxes;
- #vector words;
- #vector confidences;
- #output = ocr.run(group_img, &boxes, &words, &confidences, cv2.text.OCR_LEVEL_WORD)
+ # vector boxes;
+ # vector words;
+ # vector confidences;
+ # output = ocr.run(group_img, &boxes, &words, &confidences, cv2.text.OCR_LEVEL_WORD)
# redirection of tesseract's streams can only be done on the file descriptor level
# sys.stdout = open(os.devnull, 'w')
if backend == "pytesseract":
- output = self.ocr.image_to_string(text_img,
- lang=self.params["ocr"]["language"].value,
- config=self.ocr_config)
- logging.debug("Running pytesseract with extra command line %s", self.ocr_config)
+ output = self.ocr.image_to_string(
+ text_img,
+ lang=self.params["ocr"]["language"].value,
+ config=self.ocr_config,
+ )
+ logging.debug(
+ "Running pytesseract with extra command line %s", self.ocr_config
+ )
elif backend == "tesserocr":
self.ocr.SetImage(PIL.Image.fromarray(text_img))
output = self.ocr.GetUTF8Text()
else:
stdout_fd = sys.stdout.fileno() if hasattr(sys.stdout, "fileno") else 1
stderr_fd = sys.stderr.fileno() if hasattr(sys.stderr, "fileno") else 2
- null_fo = open(os.devnull, 'wb')
- with os.fdopen(os.dup(stdout_fd), 'wb') as cpout_fo:
- with os.fdopen(os.dup(stderr_fd), 'wb') as cperr_fo:
+ null_fo = open(os.devnull, "wb")
+ with os.fdopen(os.dup(stdout_fd), "wb") as cpout_fo:
+ with os.fdopen(os.dup(stderr_fd), "wb") as cperr_fo:
sys.stdout.flush()
sys.stderr.flush()
os.dup2(null_fo.fileno(), stdout_fd)
os.dup2(null_fo.fileno(), stderr_fd)
- output = self.ocr.run(text_img, text_img,
- self.params["ocr"]["min_confidence"].value,
- self.params["ocr"]["component_level"].value)
+ output = self.ocr.run(
+ text_img,
+ text_img,
+ self.params["ocr"]["min_confidence"].value,
+ self.params["ocr"]["component_level"].value,
+ )
sys.stdout.flush()
sys.stderr.flush()
os.dup2(cpout_fo.fileno(), stdout_fd)
@@ -2266,9 +2932,11 @@ def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike":
if self.params["ocr"]["component_level"].value == 1:
# strip of the new line character which is never useful
output = output.rstrip()
- log.debug("OCR output %s = '%s'", i+1, output)
+ log.debug("OCR output %s = '%s'", i + 1, output)
- similarity = 1.0 - float(needle.distance_to(output)) / max(len(output), len(text_needle))
+ similarity = 1.0 - float(needle.distance_to(output)) / max(
+ len(output), len(text_needle)
+ )
log.debug("Similarity = '%s'", similarity)
self.imglog.similarities.append(similarity)
if similarity >= self.params["find"]["similarity"].value:
@@ -2276,8 +2944,8 @@ def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike":
self.imglog.locations.append((text_box[0], text_box[1]))
x, y, w, h = text_box
dx, dy = needle.center_offset.x, needle.center_offset.y
- cv2.rectangle(final_hotmap, (x, y), (x+w, y+h), (0, 0, 0), 2)
- cv2.rectangle(final_hotmap, (x, y), (x+w, y+h), (255, 255, 255), 1)
+ cv2.rectangle(final_hotmap, (x, y), (x + w, y + h), (0, 0, 0), 2)
+ cv2.rectangle(final_hotmap, (x, y), (x + w, y + h), (255, 255, 255), 1)
matches.append(Match(x, y, w, h, dx, dy, similarity))
matches = sorted(matches, key=lambda x: x.similarity, reverse=True)
@@ -2297,16 +2965,24 @@ def _detect_text_boxes(self, haystack: "Image") -> list[list[int]]:
max_segment = self.params["tdetect"]["segment_line_max"].value
for i in range(1, max_segment):
hline = cv2.getStructuringElement(cv2.MORPH_RECT, (max_segment, i))
- hlopened = cv2.morphologyEx(detection_img, cv2.MORPH_OPEN, hline, iterations=1)
+ hlopened = cv2.morphologyEx(
+ detection_img, cv2.MORPH_OPEN, hline, iterations=1
+ )
vline = cv2.getStructuringElement(cv2.MORPH_RECT, (i, max_segment))
- vlopened = cv2.morphologyEx(detection_img, cv2.MORPH_OPEN, vline, iterations=1)
+ vlopened = cv2.morphologyEx(
+ detection_img, cv2.MORPH_OPEN, vline, iterations=1
+ )
detection_img -= hlopened
detection_img -= vlopened
else:
detection_img = cv2.cvtColor(detection_img, cv2.COLOR_RGB2GRAY)
- detection_width = int(self.params["tdetect"]["recursion_width"].value * haystack.width)
- detection_height = int(self.params["tdetect"]["recursion_height"].value * haystack.height)
+ detection_width = int(
+ self.params["tdetect"]["recursion_width"].value * haystack.width
+ )
+ detection_height = int(
+ self.params["tdetect"]["recursion_height"].value * haystack.height
+ )
char_canvas = detection_img
text_canvas = numpy.array(haystack.pil_image)
@@ -2320,12 +2996,15 @@ def _detect_text_boxes(self, haystack: "Image") -> list[list[int]]:
region_w, region_h = next_region.shape[1], next_region.shape[0]
# TODO: activate flag for word-only matching if there is enough interest for this
- #output = self.tbox.image_to_boxes(next_region, self.params["tdetect"]["language"].value,
+ # output = self.tbox.image_to_boxes(next_region, self.params["tdetect"]["language"].value,
# config=self.tbox_config, output_type=self.tbox.Output.DICT)
# ...process dict
- output = self.tbox.run_and_get_output(next_region, 'box',
- self.params["tdetect"]["language"].value,
- config=self.tbox_config)
+ output = self.tbox.run_and_get_output(
+ next_region,
+ "box",
+ self.params["tdetect"]["language"].value,
+ config=self.tbox_config,
+ )
for line in output.splitlines():
tokens = line.rstrip().split(" ", maxsplit=6)
if tokens[0] != "WordStr":
@@ -2342,25 +3021,34 @@ def _detect_text_boxes(self, haystack: "Image") -> list[list[int]]:
logging.debug("Empty text found, skipping region")
continue
if (w > detection_width and h > 0) or (h > detection_height and w > 0):
- subregion_npy = next_region[max(dy, 0):min(dy+h, region_h),
- max(dx, 0):min(dx+w, region_w)]
+ subregion_npy = next_region[
+ max(dy, 0) : min(dy + h, region_h),
+ max(dx, 0) : min(dx + w, region_w),
+ ]
if next_region.shape != subregion_npy.shape:
- logging.debug("Large region of size %sx%s detected, rescanning inside of it", w, h)
+ logging.debug(
+ "Large region of size %sx%s detected, rescanning inside of it",
+ w,
+ h,
+ )
recursive_regions.append((x, y, subregion_npy))
continue
- logging.debug("Found text '%s' with tesseract-provided box %s", text, (x, y, w, h))
- cv2.rectangle(text_canvas, (x, y), (x+w, y+h), (0, 0, 0), 2)
- cv2.rectangle(text_canvas, (x, y), (x+w, y+h), (0, 255, 0), 1)
+ logging.debug(
+ "Found text '%s' with tesseract-provided box %s", text, (x, y, w, h)
+ )
+ cv2.rectangle(text_canvas, (x, y), (x + w, y + h), (0, 0, 0), 2)
+ cv2.rectangle(text_canvas, (x, y), (x + w, y + h), (0, 255, 0), 1)
text_regions.append([x, y, w, h])
return text_regions
def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int]]:
- #:.. note:: source implementation by Adrian Rosebrock from his post:
- #: https://www.pyimagesearch.com/2018/08/20/opencv-text-detection-east-text-detector/
+ # :.. note:: source implementation by Adrian Rosebrock from his post:
+ # : https://www.pyimagesearch.com/2018/08/20/opencv-text-detection-east-text-detector/
import cv2
import numpy
+
img = numpy.array(haystack.pil_image)
char_canvas = cv2.cvtColor(numpy.array(haystack.pil_image), cv2.COLOR_RGB2GRAY)
text_canvas = numpy.array(haystack.pil_image)
@@ -2368,21 +3056,27 @@ def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int]
self.imglog.hotmaps.append(text_canvas)
# resize the image to resolution compatible with the model
- inp_width, inp_height = (self.params["tdetect"]["input_res_x"].value,
- self.params["tdetect"]["input_res_y"].value)
+ inp_width, inp_height = (
+ self.params["tdetect"]["input_res_x"].value,
+ self.params["tdetect"]["input_res_y"].value,
+ )
width_ratio = img.shape[1] / float(inp_width)
height_ratio = img.shape[0] / float(inp_height)
img = cv2.resize(img, (inp_width, inp_height))
# convert to a model-compatible input using the mean from the training
- inp = cv2.dnn.blobFromImage(img, mean=(123.68, 116.78, 103.94), swapRB=True, crop=False)
+ inp = cv2.dnn.blobFromImage(
+ img, mean=(123.68, 116.78, 103.94), swapRB=True, crop=False
+ )
self.east_net.setInput(inp)
# select two output layers for the EAST detector model respectivelly for
# the output probabilities and the text bounding box coordinates
output_layers = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
probability, geometry = self.east_net.forward(output_layers)
- char_canvas[:] = cv2.resize(probability[0, 0]*255.0, (char_canvas.shape[1], char_canvas.shape[0]))
+ char_canvas[:] = cv2.resize(
+ probability[0, 0] * 255.0, (char_canvas.shape[1], char_canvas.shape[0])
+ )
rects = []
for row in range(0, probability.shape[2]):
@@ -2400,14 +3094,34 @@ def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int]
# calculate the rotation angle from the prediction output
sin, cos = numpy.sin(row_data[4][col]), numpy.cos(row_data[4][col])
# compute the starting (from ending) coordinates for the text bounding box
- x2 = min(dx + cos * row_data[1][col] + sin * row_data[2][col], inp_width) * width_ratio
- y2 = min(dy - sin * row_data[1][col] + cos * row_data[2][col], inp_height) * height_ratio
+ x2 = (
+ min(dx + cos * row_data[1][col] + sin * row_data[2][col], inp_width)
+ * width_ratio
+ )
+ y2 = (
+ min(
+ dy - sin * row_data[1][col] + cos * row_data[2][col], inp_height
+ )
+ * height_ratio
+ )
# the network might give unlimited region boundaries so limit by input width/height (above)
x1, y1 = x2 - w, y2 - h
rect = (int(x1), int(y1), int(w), int(h))
- cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2)
- cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (255, 255, 255), 1)
+ cv2.rectangle(
+ char_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (0, 0, 0),
+ 2,
+ )
+ cv2.rectangle(
+ char_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (255, 255, 255),
+ 1,
+ )
rects.append(rect)
# TODO: needed for outsourced nonmaxima supression
# confidences.append(row_scores[x])
@@ -2432,16 +3146,37 @@ def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int]
for r2pair in region_queue:
r2: tuple[int, int, int, int] = r2pair[0]
# if the two regions intersect
- if (r1[0] < r2[0] + r2[2] and r1[0] + r1[2] > r2[0]
- and r1[1] < r2[1] + r2[3] and r1[1] + r1[3] > r2[1]):
- r1 = [min(r1[0], r2[0]), min(r1[1], r2[1]), max(r1[2], r2[2]), max(r1[3], r2[3])]
+ if (
+ r1[0] < r2[0] + r2[2]
+ and r1[0] + r1[2] > r2[0]
+ and r1[1] < r2[1] + r2[3]
+ and r1[1] + r1[3] > r2[1]
+ ):
+ r1 = [
+ min(r1[0], r2[0]),
+ min(r1[1], r2[1]),
+ max(r1[2], r2[2]),
+ max(r1[3], r2[3]),
+ ]
# second region will no longer be considered
r2pair[1] = False
# first region is now merged with all intersecting regions
text_regions.append(r1)
for rect in text_regions:
- cv2.rectangle(text_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2)
- cv2.rectangle(text_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 255), 1)
+ cv2.rectangle(
+ text_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (0, 0, 0),
+ 2,
+ )
+ cv2.rectangle(
+ text_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (0, 0, 255),
+ 1,
+ )
logging.debug("A total of %s final text regions found", len(text_regions))
return text_regions
@@ -2449,6 +3184,7 @@ def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int]
def _detect_text_erstat(self, haystack: "Image") -> list[tuple[int, int, int, int]]:
import cv2
import numpy
+
img = numpy.array(haystack.pil_image)
char_canvas = numpy.array(haystack.pil_image)
text_canvas = numpy.array(haystack.pil_image)
@@ -2458,32 +3194,68 @@ def _detect_text_erstat(self, haystack: "Image") -> list[tuple[int, int, int, in
# extract channels to be processed individually - B, G, R, lightness, and gradient magnitude
channels = list(cv2.text.computeNMChannels(img))
# append negative channels to detect ER- (bright regions over dark background) skipping the gradient channel
- channel_num_without_grad = len(channels)-1
+ channel_num_without_grad = len(channels) - 1
for i in range(0, channel_num_without_grad):
- channels.append(255-channels[i])
+ channels.append(255 - channels[i])
char_regions = []
text_regions = []
# apply the default cascade classifier to each independent channel
- log.debug("Extracting class specific extremal regions from %s channels", len(channels))
+ log.debug(
+ "Extracting class specific extremal regions from %s channels", len(channels)
+ )
for i, channel in enumerate(channels):
# one liner for "erf1.run(channel)" then "erf2.run(channel)"
regions = cv2.text.detectRegions(channel, self.erf1, self.erf2)
- logging.debug("A total of %s possible character regions found on channel %s", len(regions), i)
+ logging.debug(
+ "A total of %s possible character regions found on channel %s",
+ len(regions),
+ i,
+ )
rects = [cv2.boundingRect(p.reshape(-1, 1, 2)) for p in regions]
for rect in rects:
- cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2)
- cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 255), 1)
+ cv2.rectangle(
+ char_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (0, 0, 0),
+ 2,
+ )
+ cv2.rectangle(
+ char_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (0, 0, 255),
+ 1,
+ )
if len(regions) == 0:
continue
- region_groups = cv2.text.erGrouping(img, channel, [r.tolist() for r in regions])
- logging.debug("A total of %s possible text regions found on channel %s", len(region_groups), i)
+ region_groups = cv2.text.erGrouping(
+ img, channel, [r.tolist() for r in regions]
+ )
+ logging.debug(
+ "A total of %s possible text regions found on channel %s",
+ len(region_groups),
+ i,
+ )
for rect in region_groups:
- cv2.rectangle(text_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2)
- cv2.rectangle(text_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 255, 0), 1)
+ cv2.rectangle(
+ text_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (0, 0, 0),
+ 2,
+ )
+ cv2.rectangle(
+ text_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (0, 255, 0),
+ 1,
+ )
char_regions.extend(regions)
text_regions.extend(region_groups)
@@ -2501,18 +3273,30 @@ def _detect_text_erstat(self, haystack: "Image") -> list[tuple[int, int, int, in
for r2pair in region_queue:
r2, _ = r2pair
# if the two regions intersect
- if (r1[0] < r2[0] + r2[2] and r1[0] + r1[2] > r2[0]
- and r1[1] < r2[1] + r2[3] and r1[1] + r1[3] > r2[1]):
- r1 = [min(r1[0], r2[0]), min(r1[1], r2[1]), max(r1[2], r2[2]), max(r1[3], r2[3])]
+ if (
+ r1[0] < r2[0] + r2[2]
+ and r1[0] + r1[2] > r2[0]
+ and r1[1] < r2[1] + r2[3]
+ and r1[1] + r1[3] > r2[1]
+ ):
+ r1 = [
+ min(r1[0], r2[0]),
+ min(r1[1], r2[1]),
+ max(r1[2], r2[2]),
+ max(r1[3], r2[3]),
+ ]
# second region will no longer be considered
r2pair[1] = False
# first region is now merged with all intersecting regions
final_regions.append(r1)
return final_regions
- def _detect_text_contours(self, haystack: "Image") -> list[tuple[int, int, int, int]]:
+ def _detect_text_contours(
+ self, haystack: "Image"
+ ) -> list[tuple[int, int, int, int]]:
import cv2
import numpy
+
img = numpy.array(haystack.pil_image)
char_canvas = numpy.array(haystack.pil_image)
text_canvas = numpy.array(haystack.pil_image)
@@ -2526,27 +3310,39 @@ def _detect_text_contours(self, haystack: "Image") -> list[tuple[int, int, int,
char_regions = []
for hcontour in haystack_contours:
x, y, w, h = cv2.boundingRect(hcontour)
- area, ratio = cv2.contourArea(hcontour), float(w)/h
- if (area < self.params["contour"]["minArea"].value
+ area, ratio = cv2.contourArea(hcontour), float(w) / h
+ if (
+ area < self.params["contour"]["minArea"].value
or area > self.params["tdetect"]["maxArea"].value
or w < self.params["tdetect"]["minWidth"].value
or w > self.params["tdetect"]["maxWidth"].value
or h < self.params["tdetect"]["minHeight"].value
or h > self.params["tdetect"]["maxHeight"].value
or ratio < self.params["tdetect"]["minAspectRatio"].value
- or ratio > self.params["tdetect"]["maxAspectRatio"].value):
- log.debug("Ignoring contour with area %sx%s>%s and aspect ratio %s/%s=%s",
- w, h, area, w, h, ratio)
+ or ratio > self.params["tdetect"]["maxAspectRatio"].value
+ ):
+ log.debug(
+ "Ignoring contour with area %sx%s>%s and aspect ratio %s/%s=%s",
+ w,
+ h,
+ area,
+ w,
+ h,
+ ratio,
+ )
continue
else:
- cv2.rectangle(char_canvas, (x, y), (x+w, y+h), (0, 0, 0), 2)
- cv2.rectangle(char_canvas, (x, y), (x+w, y+h), (0, 0, 255), 1)
+ cv2.rectangle(char_canvas, (x, y), (x + w, y + h), (0, 0, 0), 2)
+ cv2.rectangle(char_canvas, (x, y), (x + w, y + h), (0, 0, 255), 1)
char_regions.append((x, y, w, h))
char_regions = sorted(char_regions, key=lambda x: x[0])
# group characters into horizontally-correlated regions
text_regions = []
- dx, dy = self.params["tdetect"]["horizontalSpacing"].value, self.params["tdetect"]["verticalVariance"].value
+ dx, dy = (
+ self.params["tdetect"]["horizontalSpacing"].value,
+ self.params["tdetect"]["verticalVariance"].value,
+ )
text_orientation = self.params["tdetect"]["orientation"].value
min_chars_for_text = self.params["tdetect"]["minChars"].value
for i, region1 in enumerate(char_regions):
@@ -2561,28 +3357,49 @@ def _detect_text_contours(self, haystack: "Image") -> list[tuple[int, int, int,
x1, y1, w1, h1 = region1
x2, y2, w2, h2 = region2
if text_orientation == 0:
- is_text = x2 - (x1 + w1) < dx and x1 - (x2 + w2) < dx and abs(y1 - y2) < dy and abs(h1 - h2) < 2*dy
+ is_text = (
+ x2 - (x1 + w1) < dx
+ and x1 - (x2 + w2) < dx
+ and abs(y1 - y2) < dy
+ and abs(h1 - h2) < 2 * dy
+ )
elif text_orientation == 1:
- is_text = y2 - (y1 + h1) < dy and y1 - (y2 + h2) < dy and abs(x1 - x2) < dx and abs(w1 - w2) < 2*dx
+ is_text = (
+ y2 - (y1 + h1) < dy
+ and y1 - (y2 + h2) < dy
+ and abs(x1 - x2) < dx
+ and abs(w1 - w2) < 2 * dx
+ )
if is_text:
- region1 = (min(x1, x2), min(y1, y2), max(x1+w1, x2+w2)-min(x1, x2), max(y1+h1, y2+h2)-min(y1, y2))
+ region1 = (
+ min(x1, x2),
+ min(y1, y2),
+ max(x1 + w1, x2 + w2) - min(x1, x2),
+ max(y1 + h1, y2 + h2) - min(y1, y2),
+ )
chars_for_text += 1
char_regions[j] = None
if chars_for_text < min_chars_for_text:
- log.debug("Ignoring text contour with %s<%s characters",
- chars_for_text, min_chars_for_text)
+ log.debug(
+ "Ignoring text contour with %s<%s characters",
+ chars_for_text,
+ min_chars_for_text,
+ )
continue
x, y, w, h = region1
- cv2.rectangle(text_canvas, (x, y), (x+w, y+h), (0, 0, 0), 2)
- cv2.rectangle(text_canvas, (x, y), (x+w, y+h), (0, 255, 0), 1)
+ cv2.rectangle(text_canvas, (x, y), (x + w, y + h), (0, 0, 0), 2)
+ cv2.rectangle(text_canvas, (x, y), (x + w, y + h), (0, 255, 0), 1)
text_regions.append(region1)
char_regions[i] = None
return text_regions
- def _detect_text_components(self, haystack: "Image") -> list[tuple[int, int, int, int]]:
+ def _detect_text_components(
+ self, haystack: "Image"
+ ) -> list[tuple[int, int, int, int]]:
import cv2
import numpy
+
img = numpy.array(haystack.pil_image)
char_canvas = numpy.array(haystack.pil_image)
text_canvas = numpy.array(haystack.pil_image)
@@ -2590,9 +3407,14 @@ def _detect_text_components(self, haystack: "Image") -> list[tuple[int, int, int
self.imglog.hotmaps.append(text_canvas)
connectivity = self.params["tdetect"]["connectivity"].value
- label_num, label_img, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity, cv2.CV_32S)
- logging.debug("Detected %s component labels with centroids: %s", label_num,
- ", ".join([str((int(c[0]), int(c[1]))) for c in centroids]))
+ label_num, label_img, stats, centroids = cv2.connectedComponentsWithStats(
+ img, connectivity, cv2.CV_32S
+ )
+ logging.debug(
+ "Detected %s component labels with centroids: %s",
+ label_num,
+ ", ".join([str((int(c[0]), int(c[1]))) for c in centroids]),
+ )
self.imglog.hotmaps.append(label_img * 255)
for i in range(label_num):
x, y = stats[i, cv2.CC_STAT_LEFT], stats[i, cv2.CC_STAT_TOP]
@@ -2602,13 +3424,29 @@ def _detect_text_components(self, haystack: "Image") -> list[tuple[int, int, int
continue
else:
rect = [x, y, w, h]
- cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2)
- cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 255), 1)
+ cv2.rectangle(
+ char_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (0, 0, 0),
+ 2,
+ )
+ cv2.rectangle(
+ char_canvas,
+ (rect[0], rect[1]),
+ (rect[0] + rect[2], rect[1] + rect[3]),
+ (0, 0, 255),
+ 1,
+ )
# TODO: log here since not fully implemented
- self.imglog.hotmaps[-1] = cv2.normalize(label_img, label_img, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
+ self.imglog.hotmaps[-1] = cv2.normalize(
+ label_img, label_img, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U
+ )
self.imglog.log(30)
- raise NotImplementedError("The connected components method for text detection needs more labels")
+ raise NotImplementedError(
+ "The connected components method for text detection needs more labels"
+ )
# TODO: alternatively use cvBlobsLib
# myblobs = CBlobResult(binary_image, mask, 0, True)
@@ -2617,6 +3455,8 @@ def _detect_text_components(self, haystack: "Image") -> list[tuple[int, int, int
def log(self, lvl: int) -> None:
"""
+ Log images with an arbitrary logging level.
+
Custom implementation of the base method.
See base method for details.
@@ -2630,21 +3470,33 @@ def log(self, lvl: int) -> None:
return
# no hotmaps to log
elif len(self.imglog.hotmaps) == 0:
- raise MissingHotmapError("No matching was performed in order to be image logged")
-
- self.imglog.dump_hotmap("imglog%s-3hotmap-1char.png" % self.imglog.printable_step,
- self.imglog.hotmaps[0])
- self.imglog.dump_hotmap("imglog%s-3hotmap-2text.png" % self.imglog.printable_step,
- self.imglog.hotmaps[1])
-
- for i in range(2, len(self.imglog.hotmaps)-1):
- self.imglog.dump_hotmap("imglog%s-3hotmap-3ocr-%stext-%s.png" % (self.imglog.printable_step, i-1,
- self.imglog.similarities[i-2]),
- self.imglog.hotmaps[i])
-
- similarity = max(self.imglog.similarities) if len(self.imglog.similarities) > 0 else 0.0
- self.imglog.dump_hotmap("imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity),
- self.imglog.hotmaps[-1])
+ raise MissingHotmapError(
+ "No matching was performed in order to be image logged"
+ )
+
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-1char.png" % self.imglog.printable_step,
+ self.imglog.hotmaps[0],
+ )
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-2text.png" % self.imglog.printable_step,
+ self.imglog.hotmaps[1],
+ )
+
+ for i in range(2, len(self.imglog.hotmaps) - 1):
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-3ocr-%stext-%s.png"
+ % (self.imglog.printable_step, i - 1, self.imglog.similarities[i - 2]),
+ self.imglog.hotmaps[i],
+ )
+
+ similarity = (
+ max(self.imglog.similarities) if len(self.imglog.similarities) > 0 else 0.0
+ )
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity),
+ self.imglog.hotmaps[-1],
+ )
self.imglog.clear()
ImageLogger.step += 1
@@ -2678,10 +3530,20 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
if synchronize:
FeatureFinder.synchronize(self, reset=False)
- def __configure_backend(self, backend: str = None, category: str = "tempfeat",
- reset: bool = False) -> None:
- if category not in ["tempfeat", "template", "feature", "fdetect", "fextract", "fmatch"]:
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ def __configure_backend(
+ self, backend: str = None, category: str = "tempfeat", reset: bool = False
+ ) -> None:
+ if category not in [
+ "tempfeat",
+ "template",
+ "feature",
+ "fdetect",
+ "fextract",
+ "fmatch",
+ ]:
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
elif category in ["feature", "fdetect", "fextract", "fmatch"]:
FeatureFinder.configure_backend(self, backend, category, reset)
return
@@ -2694,25 +3556,35 @@ def __configure_backend(self, backend: str = None, category: str = "tempfeat",
if backend is None:
backend = "mixed"
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
self.params[category] = {}
self.params[category]["backend"] = backend
self.params[category]["front_similarity"] = CVParameter(0.7, 0.0, 1.0)
- def configure_backend(self, backend: str = None, category: str = "tempfeat",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "tempfeat", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __configure(self, template_match: str = None, feature_detect: str = None,
- feature_extract: str = None, feature_match: str = None,
- reset: bool = True) -> None:
+ def __configure(
+ self,
+ template_match: str = None,
+ feature_detect: str = None,
+ feature_extract: str = None,
+ feature_match: str = None,
+ reset: bool = True,
+ ) -> None:
self.__configure_backend(category="tempfeat", reset=reset)
self.__configure_backend(template_match, "template")
self.__configure_backend(category="feature")
@@ -2720,32 +3592,53 @@ def __configure(self, template_match: str = None, feature_detect: str = None,
self.__configure_backend(feature_extract, "fextract")
self.__configure_backend(feature_match, "fmatch")
- def configure(self, template_match: str = None, feature_detect: str = None,
- feature_extract: str = None, feature_match: str = None,
- reset: bool = True, **kwargs: dict[str, type]) -> None:
+ def configure(
+ self,
+ template_match: str = None,
+ feature_detect: str = None,
+ feature_extract: str = None,
+ feature_match: str = None,
+ reset: bool = True,
+ **kwargs: dict[str, type]
+ ) -> None:
"""
- Custom implementation of the base methods.
+ Generate configuration dictionary for all backends.
+
+ Custom implementation of the base method.
See base methods for details.
"""
- self.__configure(template_match, feature_detect, feature_extract, feature_match, reset)
-
- def synchronize(self, feature_detect: str = None, feature_extract: str = None,
- feature_match: str = None, reset: bool = True) -> None:
+ self.__configure(
+ template_match, feature_detect, feature_extract, feature_match, reset
+ )
+
+ def synchronize(
+ self,
+ feature_detect: str = None,
+ feature_extract: str = None,
+ feature_match: str = None,
+ reset: bool = True,
+ ) -> None:
"""
+ Synchronize all backends with the current configuration dictionary.
+
Custom implementation of the base method.
See base method for details.
"""
Finder.synchronize_backend(self, "tempfeat", reset=reset)
- FeatureFinder.synchronize(self,
- feature_detect=feature_detect,
- feature_extract=feature_extract,
- feature_match=feature_match,
- reset=False)
+ FeatureFinder.synchronize(
+ self,
+ feature_detect=feature_detect,
+ feature_extract=feature_extract,
+ feature_match=feature_match,
+ reset=False,
+ )
def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
"""
+ Find all needle targets in a haystack image.
+
Custom implementation of the base method.
See base method for details.
@@ -2759,9 +3652,12 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
# use a different lower similarity for the template matching
template_similarity = self.params["tempfeat"]["front_similarity"].value
feature_similarity = self.params["find"]["similarity"].value
- log.debug("Using tempfeat matching with template similarity %s "
- "and feature similarity %s", template_similarity,
- feature_similarity)
+ log.debug(
+ "Using tempfeat matching with template similarity %s "
+ "and feature similarity %s",
+ template_similarity,
+ feature_similarity,
+ )
# class-specific dependencies
import cv2
@@ -2786,8 +3682,12 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
down = min(haystack.height, up + needle.height)
left = upleft.x
right = min(haystack.width, left + needle.width)
- log.log(9, "Maximum up-down is %s and left-right is %s",
- (up, down), (left, right))
+ log.log(
+ 9,
+ "Maximum up-down is %s and left-right is %s",
+ (up, down),
+ (left, right),
+ )
haystack_region = hgray[up:down, left:right]
haystack_region = haystack_region.copy()
@@ -2799,21 +3699,32 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
self.imglog.hotmaps.append(hotmap_region)
self.imglog.hotmaps.append(hotmap_region)
- res = self._project_features(frame_points, ngray, haystack_region, feature_similarity)
+ res = self._project_features(
+ frame_points, ngray, haystack_region, feature_similarity
+ )
# if the feature matching succeeded or is worse than satisfactory template matching
- if res is not None or (self.imglog.similarities[-1] > 0.0
- and self.imglog.similarities[-1] < self.imglog.similarities[i]
- and self.imglog.similarities[i] > feature_similarity):
+ if res is not None or (
+ self.imglog.similarities[-1] > 0.0
+ and self.imglog.similarities[-1] < self.imglog.similarities[i]
+ and self.imglog.similarities[i] > feature_similarity
+ ):
# take the template matching location rather than the feature one
# for stability (they should ultimately be the same)
- log.debug("Using template result %s instead of the worse feature result %s",
- self.imglog.similarities[i], self.imglog.similarities[-1])
+ log.debug(
+ "Using template result %s instead of the worse feature result %s",
+ self.imglog.similarities[i],
+ self.imglog.similarities[-1],
+ )
location = (left, up)
self.imglog.locations[-1] = location
- feature_maxima.append([self.imglog.hotmaps[-1],
- self.imglog.similarities[-1],
- self.imglog.locations[-1]])
+ feature_maxima.append(
+ [
+ self.imglog.hotmaps[-1],
+ self.imglog.similarities[-1],
+ self.imglog.locations[-1],
+ ]
+ )
# stitch back for a better final image logging
final_hotmap[up:down, left:right] = hotmap_region
@@ -2833,21 +3744,31 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
for i, _ in enumerate(template_maxima):
# test the template match also against the actual required similarity
if self.imglog.similarities[i] >= feature_similarity:
- feature_maxima.append([self.imglog.hotmaps[i],
- self.imglog.similarities[i],
- self.imglog.locations[i]])
+ feature_maxima.append(
+ [
+ self.imglog.hotmaps[i],
+ self.imglog.similarities[i],
+ self.imglog.locations[i],
+ ]
+ )
# release the accumulated logging from subroutines
ImageLogger.accumulate_logging = False
if len(feature_maxima) == 0:
- log.debug("No acceptable match with the given feature similarity %s",
- feature_similarity)
+ log.debug(
+ "No acceptable match with the given feature similarity %s",
+ feature_similarity,
+ )
if len(self.imglog.similarities) > 1:
# NOTE: handle cases when the matching failed at the feature stage, i.e. dump
# a hotmap for debugging also in this case
self.imglog.hotmaps.append(final_hotmap)
- self.imglog.similarities.append(self.imglog.similarities[len(template_maxima)])
- self.imglog.locations.append(self.imglog.locations[len(template_maxima)])
+ self.imglog.similarities.append(
+ self.imglog.similarities[len(template_maxima)]
+ )
+ self.imglog.locations.append(
+ self.imglog.locations[len(template_maxima)]
+ )
elif len(self.imglog.similarities) == 1:
# NOTE: we are only interested in the template hotmap on template failure
self.imglog.hotmaps.append(self.imglog.hotmaps[0])
@@ -2856,14 +3777,27 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
matches = []
from .match import Match
+
maxima = sorted(feature_maxima, key=lambda x: x[1], reverse=True)
for maximum in maxima:
similarity = maximum[1]
x, y = maximum[2]
w, h = needle.width, needle.height
dx, dy = needle.center_offset.x, needle.center_offset.y
- cv2.rectangle(final_hotmap, (x, y), (x+needle.width, y+needle.height), (0, 0, 0), 2)
- cv2.rectangle(final_hotmap, (x, y), (x+needle.width, y+needle.height), (0, 0, 255), 1)
+ cv2.rectangle(
+ final_hotmap,
+ (x, y),
+ (x + needle.width, y + needle.height),
+ (0, 0, 0),
+ 2,
+ )
+ cv2.rectangle(
+ final_hotmap,
+ (x, y),
+ (x + needle.width, y + needle.height),
+ (0, 0, 255),
+ 1,
+ )
matches.append(Match(x, y, w, h, dx, dy, similarity))
self.imglog.hotmaps.append(final_hotmap)
# log one best match for final hotmap filename
@@ -2876,6 +3810,8 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
def log(self, lvl: int) -> None:
"""
+ Log images with an arbitrary logging level.
+
Custom implementation of the base method.
See base method for details.
@@ -2889,26 +3825,36 @@ def log(self, lvl: int) -> None:
return
# no hotmaps to log
elif len(self.imglog.hotmaps) == 0:
- raise MissingHotmapError("No matching was performed in order to be image logged")
+ raise MissingHotmapError(
+ "No matching was performed in order to be image logged"
+ )
# knowing how the tempfeat works this estimates
# the expected number of cases starting from 1 (i+1)
# to make sure the winner is the first alphabetically
candidate_num = int(len(self.imglog.similarities) / 2)
for i in range(candidate_num):
- name = "imglog%s-3hotmap-%stemplate-%s.png" % (self.imglog.printable_step,
- i + 1, self.imglog.similarities[i])
+ name = "imglog%s-3hotmap-%stemplate-%s.png" % (
+ self.imglog.printable_step,
+ i + 1,
+ self.imglog.similarities[i],
+ )
self.imglog.dump_hotmap(name, self.imglog.hotmaps[i])
ii = candidate_num + i
- hii = candidate_num + i*4 + 3
- #self.imglog.log_locations(30, [self.imglog.locations[ii]], self.imglog.hotmaps[hii], 4, 255, 0, 0)
- name = "imglog%s-3hotmap-%sfeature-%s.png" % (self.imglog.printable_step,
- i + 1, self.imglog.similarities[ii])
+ hii = candidate_num + i * 4 + 3
+ # self.imglog.log_locations(30, [self.imglog.locations[ii]], self.imglog.hotmaps[hii], 4, 255, 0, 0)
+ name = "imglog%s-3hotmap-%sfeature-%s.png" % (
+ self.imglog.printable_step,
+ i + 1,
+ self.imglog.similarities[ii],
+ )
self.imglog.dump_hotmap(name, self.imglog.hotmaps[hii])
if len(self.imglog.similarities) % 2 == 1:
- name = "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step,
- self.imglog.similarities[-1])
+ name = "imglog%s-3hotmap-%s.png" % (
+ self.imglog.printable_step,
+ self.imglog.similarities[-1],
+ )
self.imglog.dump_hotmap(name, self.imglog.hotmaps[-1])
self.imglog.clear()
@@ -2926,8 +3872,12 @@ class DeepFinder(Finder):
_cache = {}
- def __init__(self, classifier_datapath: str = ".", configure: bool = True,
- synchronize: bool = True) -> None:
+ def __init__(
+ self,
+ classifier_datapath: str = ".",
+ configure: bool = True,
+ synchronize: bool = True,
+ ) -> None:
"""Build a CV backend using OpenCV's text matching options."""
super(DeepFinder, self).__init__(configure=False, synchronize=False)
@@ -2944,22 +3894,22 @@ def __init__(self, classifier_datapath: str = ".", configure: bool = True,
if synchronize:
self.__synchronize_backend(reset=False)
- def __configure_backend(self, backend: str = None, category: str = "deep",
- reset: bool = False) -> None:
- """
- Custom implementation of the base method.
-
- See base method for details.
- """
+ def __configure_backend(
+ self, backend: str = None, category: str = "deep", reset: bool = False
+ ) -> None:
if category != "deep":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(DeepFinder, self).configure_backend("deep", reset=True)
if backend is None:
backend = GlobalConfig.deep_learn_backend
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
self.params[category] = {}
self.params[category]["backend"] = backend
@@ -2973,23 +3923,31 @@ def __configure_backend(self, backend: str = None, category: str = "deep",
# file to load pre-trained model weights from
self.params[category]["model"] = CVParameter("")
- def configure_backend(self, backend: str = None, category: str = "deep",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "deep", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __synchronize_backend(self, backend: str = None, category: str = "deep",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "deep", reset: bool = False
+ ) -> None:
if category != "deep":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(DeepFinder, self).synchronize_backend("deep", reset=True)
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
backend = self.params[category]["backend"]
# reuse or cache a unique model depending on arch and checkpoint
@@ -3010,12 +3968,14 @@ def __synchronize_backend(self, backend: str = None, category: str = "deep",
else:
# only models pretrained on the COCO dataset are available
is_pretrained = model_checkpoint == "" and model_classes == 91
- model = models.__dict__[model_arch](pretrained=is_pretrained,
- num_classes=model_classes)
+ model = models.__dict__[model_arch](
+ pretrained=is_pretrained, num_classes=model_classes
+ )
# load .pth or .pkl data file if pretrained model is available
if model_checkpoint:
- model.load_state_dict(torch.load(model_checkpoint,
- map_location="cpu"))
+ model.load_state_dict(
+ torch.load(model_checkpoint, map_location="cpu")
+ )
self._cache[model_id] = model
device_opt = self.params[category]["device"].value
@@ -3031,19 +3991,20 @@ def __synchronize_backend(self, backend: str = None, category: str = "deep",
elif backend == "tensorflow":
# class-specific dependencies
import tensorflow as tf
+
tf.keras.backend.clear_session()
# TODO: current TensorFlow model zoo/garden API is too unstable
from research.object_detection.utils import config_util
from research.object_detection.builders import model_builder
# TODO: the model ARCH and CHECKPOINT need extra path flexibility
- #tf_models_dir = 'models/research/object_detection'
- #model_arch = os.path.join(tf_models_dir, 'configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config')
- #model_checkpoint = os.path.join(tf_models_dir, 'test_data/checkpoint/ckpt-0')
+ # tf_models_dir = 'models/research/object_detection'
+ # model_arch = os.path.join(tf_models_dir, 'configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config')
+ # model_checkpoint = os.path.join(tf_models_dir, 'test_data/checkpoint/ckpt-0')
# load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(model_arch)
- model_config = configs['model']
+ model_config = configs["model"]
self.net = model_builder.build(model_config=model_config, is_training=False)
ckpt = tf.compat.v2.train.Checkpoint(model=self.net)
@@ -3052,9 +4013,12 @@ def __synchronize_backend(self, backend: str = None, category: str = "deep",
else:
raise ValueError("Invalid DL backend '%s'" % backend)
- def synchronize_backend(self, backend: str = None, category: str = "deep",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "deep", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
@@ -3063,6 +4027,8 @@ def synchronize_backend(self, backend: str = None, category: str = "deep",
def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]":
"""
+ Find all needle targets in a haystack image.
+
Custom implementation of the base method.
:param needle: target pattern (cascade) to search for
@@ -3083,24 +4049,30 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]":
backend = self.params["deep"]["backend"]
if backend == "tensorflow":
- raise NotImplementedError("The TensorFlow model zoo/garden libary "
- "is too unstable at present")
+ raise NotImplementedError(
+ "The TensorFlow model zoo/garden libary " "is too unstable at present"
+ )
assert backend == "pytorch", "Only PyTorch model zoo/garden is supported"
import torch
- classes: Callable[[Any], str] = None
+
if needle.data_file is not None:
with open(needle.data_file, "rt") as f:
classes_list = [line.rstrip() for line in f.readlines()]
- classes = lambda x: classes_list[x]
+
+ def classes(x: int) -> str:
+ return classes_list[x]
+
else:
# an infinite list as a string identity map
- classes = lambda x: str(x)
+ def classes(x: Any) -> str:
+ return str(x)
# set the module in evaluation mode
self.net.eval()
# convert haystack data to tensor variable
from torchvision import transforms
+
img = haystack.pil_image
transform = transforms.Compose([transforms.ToTensor()])
img = transform(img)
@@ -3113,19 +4085,25 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]":
matches = []
from .match import Match
- for i in range(len(pred[0]['labels'])):
- label = classes(pred[0]['labels'][i].cpu().item())
- score = pred[0]['scores'][i].cpu().item()
- x, y, w, h = list(pred[0]['boxes'][i].cpu().numpy())
- rect = (int(x), int(y), int(x+w), int(y+h))
+
+ for i in range(len(pred[0]["labels"])):
+ label = classes(pred[0]["labels"][i].cpu().item())
+ score = pred[0]["scores"][i].cpu().item()
+ x, y, w, h = list(pred[0]["boxes"][i].cpu().numpy())
+ rect = (int(x), int(y), int(x + w), int(y + h))
from PIL import ImageDraw
+
draw = ImageDraw.Draw(full_hotmap)
draw.rectangle(rect, outline=(255, 0, 0))
draw.text((rect[0], rect[1]), label, fill=(255, 0, 0, 0))
if score < similarity:
- logging.debug("Found %s has a low confidence score %s<%s, skipping",
- label, score, similarity)
+ logging.debug(
+ "Found %s has a low confidence score %s<%s, skipping",
+ label,
+ score,
+ similarity,
+ )
continue
draw = ImageDraw.Draw(filtered_hotmap)
draw.rectangle(rect, outline=(0, 255, 0))
@@ -3133,8 +4111,9 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]":
if label != needle_class:
logging.debug("Found %s is not %s, skipping", label, needle_class)
continue
- logging.debug("Found %s with sufficient confidence %s at (%s, %s)",
- label, score, x, y)
+ logging.debug(
+ "Found %s with sufficient confidence %s at (%s, %s)", label, score, x, y
+ )
draw = ImageDraw.Draw(final_hotmap)
draw.rectangle(rect, outline=(0, 0, 255))
@@ -3151,6 +4130,8 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]":
def log(self, lvl: int) -> None:
"""
+ Log images with an arbitrary logging level.
+
Custom implementation of the base method.
See base method for details.
@@ -3164,14 +4145,22 @@ def log(self, lvl: int) -> None:
return
# no hotmaps to log
elif len(self.imglog.hotmaps) == 0:
- raise MissingHotmapError("No matching was performed in order to be image logged")
-
- self.imglog.dump_hotmap("imglog%s-3hotmap-1full.png" % self.imglog.printable_step,
- self.imglog.hotmaps[0])
- self.imglog.dump_hotmap("imglog%s-3hotmap-2filtered.png" % self.imglog.printable_step,
- self.imglog.hotmaps[1])
-
- similarity = self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0
+ raise MissingHotmapError(
+ "No matching was performed in order to be image logged"
+ )
+
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-1full.png" % self.imglog.printable_step,
+ self.imglog.hotmaps[0],
+ )
+ self.imglog.dump_hotmap(
+ "imglog%s-3hotmap-2filtered.png" % self.imglog.printable_step,
+ self.imglog.hotmaps[1],
+ )
+
+ similarity = (
+ self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0
+ )
name = "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity)
self.imglog.dump_hotmap(name, self.imglog.hotmaps[-1])
@@ -3195,7 +4184,13 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
# available and currently fully compatible methods
self.categories["hybrid"] = "hybrid_methods"
- self.algorithms["hybrid_methods"] = ("autopy", "contour", "template", "feature", "tempfeat")
+ self.algorithms["hybrid_methods"] = (
+ "autopy",
+ "contour",
+ "template",
+ "feature",
+ "tempfeat",
+ )
# other attributes
self.matcher = None
@@ -3206,39 +4201,52 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None:
if synchronize:
self.__synchronize_backend(reset=False)
- def __configure_backend(self, backend: str = None, category: str = "hybrid",
- reset: bool = False) -> None:
+ def __configure_backend(
+ self, backend: str = None, category: str = "hybrid", reset: bool = False
+ ) -> None:
if category != "hybrid":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
# backends are the same as the ones for the base class
super(HybridFinder, self).configure_backend(backend=backend, reset=True)
if backend is None:
backend = GlobalConfig.hybrid_match_backend
if backend not in self.algorithms[self.categories[category]]:
- raise UnsupportedBackendError("Backend '%s' is not among the supported ones: "
- "%s" % (backend, self.algorithms[self.categories[category]]))
+ raise UnsupportedBackendError(
+ "Backend '%s' is not among the supported ones: "
+ "%s" % (backend, self.algorithms[self.categories[category]])
+ )
self.params[category] = {}
self.params[category]["backend"] = backend
- def configure_backend(self, backend: str = None, category: str = "hybrid",
- reset: bool = False) -> None:
+ def configure_backend(
+ self, backend: str = None, category: str = "hybrid", reset: bool = False
+ ) -> None:
"""
+ Generate configuration dictionary for a given backend.
+
Custom implementation of the base method.
See base method for details.
"""
self.__configure_backend(backend, category, reset)
- def __synchronize_backend(self, backend: str = None, category: str = "hybrid",
- reset: bool = False) -> None:
+ def __synchronize_backend(
+ self, backend: str = None, category: str = "hybrid", reset: bool = False
+ ) -> None:
if category != "hybrid":
- raise UnsupportedBackendError("Backend category '%s' is not supported" % category)
+ raise UnsupportedBackendError(
+ "Backend category '%s' is not supported" % category
+ )
if reset:
super(HybridFinder, self).synchronize_backend("hybrid", reset=True)
if backend is not None and self.params[category]["backend"] != backend:
- raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend)
+ raise UninitializedBackendError(
+ "Backend '%s' has not been configured yet" % backend
+ )
backend = self.params[category]["backend"]
# default matcher in case of a simple chain without own matching config
@@ -3259,9 +4267,12 @@ def __synchronize_backend(self, backend: str = None, category: str = "hybrid",
elif backend == "deep":
self.matcher = DeepFinder()
- def synchronize_backend(self, backend: str = None, category: str = "hybrid",
- reset: bool = False) -> None:
+ def synchronize_backend(
+ self, backend: str = None, category: str = "hybrid", reset: bool = False
+ ) -> None:
"""
+ Synchronize a category backend with the equalizer configuration.
+
Custom implementation of the base method.
See base method for details.
@@ -3270,6 +4281,8 @@ def synchronize_backend(self, backend: str = None, category: str = "hybrid",
def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
"""
+ Find all needle targets in a haystack image.
+
Custom implementation of the base method.
See base method for details.
@@ -3283,7 +4296,9 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]":
for step_needle in needle:
- if step_needle.use_own_settings and not isinstance(step_needle.match_settings, HybridFinder):
+ if step_needle.use_own_settings and not isinstance(
+ step_needle.match_settings, HybridFinder
+ ):
matcher = step_needle.match_settings
else:
matcher = self.matcher
diff --git a/guibot/guibot.py b/guibot/guibot.py
index 47154436..ce89282d 100644
--- a/guibot/guibot.py
+++ b/guibot/guibot.py
@@ -14,14 +14,13 @@
# along with guibot. If not, see .
"""
+Main guibot interface for GUI automation.
SUMMARY
------------------------------------------------------
-Main guibot interface for GUI automation.
This frontend is recommended for use in most normal cases.
-
INTERFACE
------------------------------------------------------
@@ -35,14 +34,13 @@
from .finder import Finder
-log = logging.getLogger('guibot')
+log = logging.getLogger("guibot")
log.addHandler(logging.NullHandler())
class GuiBot(Region):
"""
- The main guibot object is the root (first and screen wide) region
- with some convenience functions added.
+ The main guibot object is the root (first and screen wide) region with some convenience functions added.
.. seealso:: Real API is inherited from :py:class:`region.Region`.
"""
@@ -63,8 +61,7 @@ def __init__(self, dc: Controller = None, cv: Finder = None) -> None:
def add_path(self, directory: str) -> None:
"""
- Add a path to the list of currently accessible paths
- if it wasn't already added.
+ Add a path to the list of currently accessible paths if it wasn't already added.
:param directory: path to add
"""
diff --git a/guibot/guibot_proxy.py b/guibot/guibot_proxy.py
index b30c0ca6..0a986e89 100644
--- a/guibot/guibot_proxy.py
+++ b/guibot/guibot_proxy.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Remote guibot interface for proxy operations using remote visual objects.
SUMMARY
------------------------------------------------------
-Remote guibot interface for proxy operations using remote visual objects.
Frontend with serialization compatible API allowing the use of PyRO modified
:py:class:`guibot.GuiBot` object (creating and running the same object
@@ -25,7 +25,6 @@
this object with some additional postprocessing to make the execution remote so
for information about the API please refer to it and :py:class:`region.Region`.
-
INTERFACE
------------------------------------------------------
@@ -45,10 +44,11 @@
from .controller import Controller
-def serialize_custom_error(class_obj: type) -> dict[str, "str | getset_descriptor | dictproxy"]:
+def serialize_custom_error(
+ class_obj: type,
+) -> dict[str, "str | getset_descriptor | dictproxy"]:
"""
- Serialization method for the :py:class:`errors.UnsupportedBackendError`
- which was chosen just as a sample.
+ Serialize the :py:class:`errors.UnsupportedBackendError` which was chosen just as a sample.
:param class_obj: class object for the serialized error class
:returns: serialization dictionary with the class name, arguments, and attributes
@@ -62,21 +62,25 @@ def serialize_custom_error(class_obj: type) -> dict[str, "str | getset_descripto
def register_exception_serialization() -> None:
"""
- We put here any exceptions that are too complicated for the default serialization
- and define their serialization methods.
+ Register exceptions that are too complicated for the default serialization via own serialization methods.
.. note:: This would not be needed if we were using the Pickle serializer but its
security problems at the moment made us prefer the serpent serializer paying
for it with some extra setup steps and functions below.
"""
for exception in [errors.UnsupportedBackendError]:
- pyro.util.SerializerBase.register_class_to_dict(exception, serialize_custom_error)
+ pyro.util.SerializerBase.register_class_to_dict(
+ exception, serialize_custom_error
+ )
class GuiBotProxy(GuiBot):
"""
- The proxy guibot object is just a wrapper around the actual guibot
- object that takes care of returning easily serializable PyRO proxy objects
+ The proxy guibot object takes care of returning easily serializable PyRO proxy objects.
+
+ It is just a wrapper around the actual guibot object that replaces its real resulting objects with proxy ones.
+
+ This takes care of returning easily serializable PyRO proxy objects
instead of the real ones or their serialized copies.
It allows you to move the mouse, type text and do any other GuiBot action
@@ -159,7 +163,7 @@ def click(self, *args: tuple[type, ...], **kwargs: dict[str, type]) -> str:
"""See :py:class:`guibot.guibot.GuiBot` and its inherited :py:class:`guibot.region.Region` for details."""
return self._proxify(super(GuiBotProxy, self).click(*args, **kwargs))
- def right_click(self,*args: tuple[type, ...], **kwargs: dict[str, type]) -> str:
+ def right_click(self, *args: tuple[type, ...], **kwargs: dict[str, type]) -> str:
"""See :py:class:`guibot.guibot.GuiBot` and its inherited :py:class:`guibot.region.Region` for details."""
return self._proxify(super(GuiBotProxy, self).right_click(*args, **kwargs))
diff --git a/guibot/guibot_simple.py b/guibot/guibot_simple.py
index 361ab0e2..2f3ca0a1 100644
--- a/guibot/guibot_simple.py
+++ b/guibot/guibot_simple.py
@@ -14,17 +14,16 @@
# along with guibot. If not, see .
"""
+Simple guibot interface for short scripts, examples, and basic GUI automation.
SUMMARY
------------------------------------------------------
-Simple guibot interface for short scripts, examples, and basic GUI automation.
Frontend with simple procedural API allowing the use of a module instead of
the :py:class:`guibot.GuiBot` object (creating and running this same
object internally). All the methods delegate their calls to this object so
for information about the API please refer to it and :py:class:`region.Region`.
-
INTERFACE
------------------------------------------------------
@@ -41,7 +40,7 @@
# accessible attributes of this module
guibot = None
last_match = None
-buttons = namedtuple('buttons', ["mouse", "key", "mod"])
+buttons = namedtuple("buttons", ["mouse", "key", "mod"])
def initialize() -> None:
@@ -60,7 +59,9 @@ def initialize() -> None:
def check_initialized() -> None:
"""Make sure the simple API is initialized."""
if guibot is None:
- raise AssertionError("Guibot module not initialized - run initialize() before using the simple API")
+ raise AssertionError(
+ "Guibot module not initialized - run initialize() before using the simple API"
+ )
def add_path(*args: tuple[type, ...], **kwargs: dict[str, type]) -> None:
diff --git a/guibot/imagelogger.py b/guibot/imagelogger.py
index 9e460ac6..f8ebc167 100644
--- a/guibot/imagelogger.py
+++ b/guibot/imagelogger.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Image logging for enhanced debugging and verbosity of guibot's operation.
SUMMARY
------------------------------------------------------
-Image logging for enhanced debugging and verbosity of guibot's operation.
INTERFACE
@@ -82,6 +82,7 @@ def get_printable_step(self) -> str:
:returns: step number prepended with zeroes to obtain a fixed length enumeration
"""
return ("%0" + str(ImageLogger.step_width) + "d") % ImageLogger.step
+
printable_step = property(fget=get_printable_step)
def debug(self) -> None:
@@ -119,16 +120,15 @@ def dump_matched_images(self) -> None:
shutil.rmtree(ImageLogger.logging_destination)
os.mkdir(ImageLogger.logging_destination)
- needle_name = "imglog%s-1needle-%s" % (self.printable_step,
- str(self.needle))
- needle_path = os.path.join(ImageLogger.logging_destination,
- needle_name)
+ needle_name = "imglog%s-1needle-%s" % (self.printable_step, str(self.needle))
+ needle_path = os.path.join(ImageLogger.logging_destination, needle_name)
self.needle.save(needle_path)
- haystack_name = "imglog%s-2haystack-%s" % (self.printable_step,
- str(self.haystack))
- haystack_path = os.path.join(ImageLogger.logging_destination,
- haystack_name)
+ haystack_name = "imglog%s-2haystack-%s" % (
+ self.printable_step,
+ str(self.haystack),
+ )
+ haystack_path = os.path.join(ImageLogger.logging_destination, haystack_name)
self.haystack.save(haystack_path)
def dump_hotmap(self, name: str, hotmap: PIL.Image.Image | numpy.ndarray) -> None:
@@ -150,8 +150,8 @@ def dump_hotmap(self, name: str, hotmap: PIL.Image.Image | numpy.ndarray) -> Non
# numpy or other array
pil_image = PIL.Image.fromarray(hotmap)
# NOTE: some modes cannot be saved unless converted to RGB
- if pil_image.mode != 'RGB':
- pil_image = pil_image.convert('RGB')
+ if pil_image.mode != "RGB":
+ pil_image = pil_image.convert("RGB")
pil_image.save(path, compress_level=GlobalConfig.image_quality)
def clear(self) -> None:
diff --git a/guibot/inputmap.py b/guibot/inputmap.py
index bb98b42b..2a65741a 100644
--- a/guibot/inputmap.py
+++ b/guibot/inputmap.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Key mappings, modifiers, and mouse buttons.
SUMMARY
------------------------------------------------------
-Key mappings, modifiers, and mouse buttons.
INTERFACE
@@ -111,8 +111,7 @@ def __init__(self) -> None:
def to_string(self, key: str) -> str:
"""
- Provide with a text representation of a desired key
- according to the custom BC backend.
+ Provide with a text representation of a desired key according to the custom BC backend.
:param key: selected key name according to the custom backend
:returns: text representation of the selected key
@@ -120,75 +119,77 @@ def to_string(self, key: str) -> str:
"""
if key is None:
raise ValueError("The key %s does not exist in the current key map" % key)
- return {self.ENTER: "Enter",
- self.TAB: "Tab",
- self.ESC: "Esc",
- self.BACKSPACE: "Backspace",
- self.DELETE: "Delete",
- self.INSERT: "Insert",
- self.CTRL: "Ctrl",
- self.ALT: "Alt",
- self.SHIFT: "Shift",
- self.META: "Meta",
- self.RCTRL: "RightControl",
- self.RALT: "RightAlt",
- self.RSHIFT: "RightShift",
- self.RMETA: "RightMeta",
- self.F1: "F1",
- self.F2: "F2",
- self.F3: "F3",
- self.F4: "F4",
- self.F5: "F5",
- self.F6: "F6",
- self.F7: "F7",
- self.F8: "F8",
- self.F9: "F9",
- self.F10: "F10",
- self.F11: "F11",
- self.F12: "F12",
- self.F13: "F13",
- self.F14: "F14",
- self.F15: "F15",
- self.F16: "F16",
- self.F17: "F17",
- self.F18: "F18",
- self.F19: "F19",
- self.F20: "F20",
- self.HOME: "Home",
- self.END: "End",
- self.LEFT: "Left",
- self.RIGHT: "Right",
- self.UP: "Up",
- self.DOWN: "Down",
- self.PAGE_DOWN: "Page Down",
- self.PAGE_UP: "Page Up",
- self.CAPS_LOCK: "Caps Lock",
- self.PRINTSCREEN: "Print Screen",
- self.PAUSE: "Pause",
- self.SCROLL_LOCK: "Scroll Lock",
- self.NUM_LOCK: "Num Lock",
- self.SYS_REQ: "Sys Req",
- self.SUPER: "Super",
- self.RSUPER: "RightSuper",
- self.HYPER: "Hyper",
- self.RHYPER: "RightHyper",
- self.MENU: "Menu",
- self.KP0: "KeyPad Number 0",
- self.KP1: "KeyPad Number 1",
- self.KP2: "KeyPad Number 2",
- self.KP3: "KeyPad Number 3",
- self.KP4: "KeyPad Number 4",
- self.KP5: "KeyPad Number 5",
- self.KP6: "KeyPad Number 6",
- self.KP7: "KeyPad Number 7",
- self.KP8: "KeyPad Number 8",
- self.KP9: "KeyPad Number 9",
- self.KP_ADD: "KeyPad Add",
- self.KP_DECIMAL: "KeyPad Decimal",
- self.KP_DIVIDE: "KeyPad Divide",
- self.KP_ENTER: "KeyPad Enter",
- self.KP_MULTIPLY: "KeyPad Multiply",
- self.KP_SUBTRACT: "KeyPad Subtract"}[key]
+ return {
+ self.ENTER: "Enter",
+ self.TAB: "Tab",
+ self.ESC: "Esc",
+ self.BACKSPACE: "Backspace",
+ self.DELETE: "Delete",
+ self.INSERT: "Insert",
+ self.CTRL: "Ctrl",
+ self.ALT: "Alt",
+ self.SHIFT: "Shift",
+ self.META: "Meta",
+ self.RCTRL: "RightControl",
+ self.RALT: "RightAlt",
+ self.RSHIFT: "RightShift",
+ self.RMETA: "RightMeta",
+ self.F1: "F1",
+ self.F2: "F2",
+ self.F3: "F3",
+ self.F4: "F4",
+ self.F5: "F5",
+ self.F6: "F6",
+ self.F7: "F7",
+ self.F8: "F8",
+ self.F9: "F9",
+ self.F10: "F10",
+ self.F11: "F11",
+ self.F12: "F12",
+ self.F13: "F13",
+ self.F14: "F14",
+ self.F15: "F15",
+ self.F16: "F16",
+ self.F17: "F17",
+ self.F18: "F18",
+ self.F19: "F19",
+ self.F20: "F20",
+ self.HOME: "Home",
+ self.END: "End",
+ self.LEFT: "Left",
+ self.RIGHT: "Right",
+ self.UP: "Up",
+ self.DOWN: "Down",
+ self.PAGE_DOWN: "Page Down",
+ self.PAGE_UP: "Page Up",
+ self.CAPS_LOCK: "Caps Lock",
+ self.PRINTSCREEN: "Print Screen",
+ self.PAUSE: "Pause",
+ self.SCROLL_LOCK: "Scroll Lock",
+ self.NUM_LOCK: "Num Lock",
+ self.SYS_REQ: "Sys Req",
+ self.SUPER: "Super",
+ self.RSUPER: "RightSuper",
+ self.HYPER: "Hyper",
+ self.RHYPER: "RightHyper",
+ self.MENU: "Menu",
+ self.KP0: "KeyPad Number 0",
+ self.KP1: "KeyPad Number 1",
+ self.KP2: "KeyPad Number 2",
+ self.KP3: "KeyPad Number 3",
+ self.KP4: "KeyPad Number 4",
+ self.KP5: "KeyPad Number 5",
+ self.KP6: "KeyPad Number 6",
+ self.KP7: "KeyPad Number 7",
+ self.KP8: "KeyPad Number 8",
+ self.KP9: "KeyPad Number 9",
+ self.KP_ADD: "KeyPad Add",
+ self.KP_DECIMAL: "KeyPad Decimal",
+ self.KP_DIVIDE: "KeyPad Divide",
+ self.KP_ENTER: "KeyPad Enter",
+ self.KP_MULTIPLY: "KeyPad Multiply",
+ self.KP_SUBTRACT: "KeyPad Subtract",
+ }[key]
class AutoPyKey(Key):
@@ -285,58 +286,58 @@ def __init__(self) -> None:
"""Build an instance containing the key map for the xdotool backend."""
super().__init__()
- self.ENTER = 'Return' # also 'enter'
- self.TAB = 'Tab'
- self.ESC = 'Escape'
- self.BACKSPACE = 'BackSpace'
- self.DELETE = 'Delete'
- self.INSERT = 'Insert'
-
- self.CTRL = 'ctrl' # special handling
- self.ALT = 'alt' # special handling
- self.SHIFT = 'shift' # special handling
- self.META = 'meta' # special handling
- self.RCTRL = 'CtrlR'
- self.RALT = 'AltR'
- self.RSHIFT = 'ShiftR'
- self.RMETA = 'MetaR'
-
- self.F1 = 'F1'
- self.F2 = 'F2'
- self.F3 = 'F3'
- self.F4 = 'F4'
- self.F5 = 'F5'
- self.F6 = 'F6'
- self.F7 = 'F7'
- self.F8 = 'F8'
- self.F9 = 'F9'
- self.F10 = 'F10'
- self.F11 = 'F11'
- self.F12 = 'F12'
- self.F13 = 'F13'
- self.F14 = 'F14'
- self.F15 = 'F15'
- self.F16 = 'F16'
- self.F17 = 'F17'
- self.F18 = 'F18'
- self.F19 = 'F19'
- self.F20 = 'F20'
-
- self.HOME = 'Home'
- self.END = 'End'
- self.LEFT = 'Left'
- self.RIGHT = 'Right'
- self.UP = 'Up'
- self.DOWN = 'Down'
- self.PAGE_DOWN = 'Page_Down'
- self.PAGE_UP = 'Page_Up'
-
- self.CAPS_LOCK = 'Caps_Lock'
+ self.ENTER = "Return" # also 'enter'
+ self.TAB = "Tab"
+ self.ESC = "Escape"
+ self.BACKSPACE = "BackSpace"
+ self.DELETE = "Delete"
+ self.INSERT = "Insert"
+
+ self.CTRL = "ctrl" # special handling
+ self.ALT = "alt" # special handling
+ self.SHIFT = "shift" # special handling
+ self.META = "meta" # special handling
+ self.RCTRL = "CtrlR"
+ self.RALT = "AltR"
+ self.RSHIFT = "ShiftR"
+ self.RMETA = "MetaR"
+
+ self.F1 = "F1"
+ self.F2 = "F2"
+ self.F3 = "F3"
+ self.F4 = "F4"
+ self.F5 = "F5"
+ self.F6 = "F6"
+ self.F7 = "F7"
+ self.F8 = "F8"
+ self.F9 = "F9"
+ self.F10 = "F10"
+ self.F11 = "F11"
+ self.F12 = "F12"
+ self.F13 = "F13"
+ self.F14 = "F14"
+ self.F15 = "F15"
+ self.F16 = "F16"
+ self.F17 = "F17"
+ self.F18 = "F18"
+ self.F19 = "F19"
+ self.F20 = "F20"
+
+ self.HOME = "Home"
+ self.END = "End"
+ self.LEFT = "Left"
+ self.RIGHT = "Right"
+ self.UP = "Up"
+ self.DOWN = "Down"
+ self.PAGE_DOWN = "Page_Down"
+ self.PAGE_UP = "Page_Up"
+
+ self.CAPS_LOCK = "Caps_Lock"
# TODO: 'print screen' is not available
self.PRINTSCREEN = None
- self.PAUSE = 'Pause'
- self.SCROLL_LOCK = 'Scroll_Lock'
- self.NUM_LOCK = 'Num_Lock'
+ self.PAUSE = "Pause"
+ self.SCROLL_LOCK = "Scroll_Lock"
+ self.NUM_LOCK = "Num_Lock"
# TODO: the following are not available
self.SYS_REQ = None
self.SUPER = None
@@ -346,21 +347,21 @@ def __init__(self) -> None:
# TODO: 'menu' is not available
self.MENU = None
- self.KP0 = 'KP_0'
- self.KP1 = 'KP_1'
- self.KP2 = 'KP_2'
- self.KP3 = 'KP_3'
- self.KP4 = 'KP_4'
- self.KP5 = 'KP_5'
- self.KP6 = 'KP_6'
- self.KP7 = 'KP_7'
- self.KP8 = 'KP_8'
- self.KP9 = 'KP_9'
- self.KP_ENTER = 'KP_Enter'
- self.KP_DIVIDE = 'KP_Divide'
- self.KP_MULTIPLY = 'KP_Multiply'
- self.KP_SUBTRACT = 'KP_Subtract'
- self.KP_ADD = 'KP_Add'
+ self.KP0 = "KP_0"
+ self.KP1 = "KP_1"
+ self.KP2 = "KP_2"
+ self.KP3 = "KP_3"
+ self.KP4 = "KP_4"
+ self.KP5 = "KP_5"
+ self.KP6 = "KP_6"
+ self.KP7 = "KP_7"
+ self.KP8 = "KP_8"
+ self.KP9 = "KP_9"
+ self.KP_ENTER = "KP_Enter"
+ self.KP_DIVIDE = "KP_Divide"
+ self.KP_MULTIPLY = "KP_Multiply"
+ self.KP_SUBTRACT = "KP_Subtract"
+ self.KP_ADD = "KP_Add"
self.KP_DECIMAL = None
@@ -373,77 +374,77 @@ def __init__(self) -> None:
# TODO: it would be preferable to translate directly to RBF like
# 'ENTER = rfb.KEY_Return' but this is internal for the vncdotool
- self.ENTER = 'return' # also 'enter'
- self.TAB = 'tab'
- self.ESC = 'esc'
- self.BACKSPACE = 'bsp'
- self.DELETE = 'del' # also 'delete'
- self.INSERT = 'ins'
-
- self.CTRL = 'ctrl' # also 'lctrl'
- self.ALT = 'alt' # also 'lalt'
- self.SHIFT = 'shift' # also 'lshift'
- self.META = 'meta' # also 'lmeta'
- self.RCTRL = 'rctrl'
- self.RALT = 'ralt'
- self.RSHIFT = 'rshift'
- self.RMETA = 'rmeta'
-
- self.F1 = 'f1'
- self.F2 = 'f2'
- self.F3 = 'f3'
- self.F4 = 'f4'
- self.F5 = 'f5'
- self.F6 = 'f6'
- self.F7 = 'f7'
- self.F8 = 'f8'
- self.F9 = 'f9'
- self.F10 = 'f10'
- self.F11 = 'f11'
- self.F12 = 'f12'
- self.F13 = 'f13'
- self.F14 = 'f14'
- self.F15 = 'f15'
- self.F16 = 'f16'
- self.F17 = 'f17'
- self.F18 = 'f18'
- self.F19 = 'f19'
- self.F20 = 'f20'
-
- self.HOME = 'home'
- self.END = 'end'
- self.LEFT = 'left'
- self.RIGHT = 'right'
- self.UP = 'up'
- self.DOWN = 'down'
- self.PAGE_DOWN = 'pgdn'
- self.PAGE_UP = 'pgup'
-
- self.CAPS_LOCK = 'caplk'
+ self.ENTER = "return" # also 'enter'
+ self.TAB = "tab"
+ self.ESC = "esc"
+ self.BACKSPACE = "bsp"
+ self.DELETE = "del" # also 'delete'
+ self.INSERT = "ins"
+
+ self.CTRL = "ctrl" # also 'lctrl'
+ self.ALT = "alt" # also 'lalt'
+ self.SHIFT = "shift" # also 'lshift'
+ self.META = "meta" # also 'lmeta'
+ self.RCTRL = "rctrl"
+ self.RALT = "ralt"
+ self.RSHIFT = "rshift"
+ self.RMETA = "rmeta"
+
+ self.F1 = "f1"
+ self.F2 = "f2"
+ self.F3 = "f3"
+ self.F4 = "f4"
+ self.F5 = "f5"
+ self.F6 = "f6"
+ self.F7 = "f7"
+ self.F8 = "f8"
+ self.F9 = "f9"
+ self.F10 = "f10"
+ self.F11 = "f11"
+ self.F12 = "f12"
+ self.F13 = "f13"
+ self.F14 = "f14"
+ self.F15 = "f15"
+ self.F16 = "f16"
+ self.F17 = "f17"
+ self.F18 = "f18"
+ self.F19 = "f19"
+ self.F20 = "f20"
+
+ self.HOME = "home"
+ self.END = "end"
+ self.LEFT = "left"
+ self.RIGHT = "right"
+ self.UP = "up"
+ self.DOWN = "down"
+ self.PAGE_DOWN = "pgdn"
+ self.PAGE_UP = "pgup"
+
+ self.CAPS_LOCK = "caplk"
# TODO: 'print screen' is not available
self.PRINTSCREEN = None
- self.PAUSE = 'pause'
- self.SCROLL_LOCK = 'scrlk'
- self.NUM_LOCK = 'numlk'
- self.SYS_REQ = 'sysrq'
- self.SUPER = 'super' # also 'lsuper'
- self.RSUPER = 'rsuper'
- self.HYPER = 'hyper' # also 'lhyper'
- self.RHYPER = 'rhyper'
+ self.PAUSE = "pause"
+ self.SCROLL_LOCK = "scrlk"
+ self.NUM_LOCK = "numlk"
+ self.SYS_REQ = "sysrq"
+ self.SUPER = "super" # also 'lsuper'
+ self.RSUPER = "rsuper"
+ self.HYPER = "hyper" # also 'lhyper'
+ self.RHYPER = "rhyper"
# TODO: 'menu' is not available
self.MENU = None
- self.KP0 = 'kp0'
- self.KP1 = 'kp1'
- self.KP2 = 'kp2'
- self.KP3 = 'kp3'
- self.KP4 = 'kp4'
- self.KP5 = 'kp5'
- self.KP6 = 'kp6'
- self.KP7 = 'kp7'
- self.KP8 = 'kp8'
- self.KP9 = 'kp9'
- self.KP_ENTER = 'kpenter'
+ self.KP0 = "kp0"
+ self.KP1 = "kp1"
+ self.KP2 = "kp2"
+ self.KP3 = "kp3"
+ self.KP4 = "kp4"
+ self.KP5 = "kp5"
+ self.KP6 = "kp6"
+ self.KP7 = "kp7"
+ self.KP8 = "kp8"
+ self.KP9 = "kp9"
+ self.KP_ENTER = "kpenter"
# TODO: these are not available
self.KP_DIVIDE = None
self.KP_MULTIPLY = None
@@ -461,59 +462,59 @@ def __init__(self) -> None:
# TODO: it would be preferable to translate directly to RBF like
# 'ENTER = rfb.KEY_Return' but this is internal for the vncdotool
- self.ENTER = 'return' # also 'enter'
- self.TAB = 'tab'
- self.ESC = 'escape' # also 'esc'
- self.BACKSPACE = 'backspace'
- self.DELETE = 'delete' # also 'del'
- self.INSERT = 'insert'
-
- self.CTRL = 'ctrl' # also 'lctrl'
- self.ALT = 'alt' # also 'lalt'
- self.SHIFT = 'shift' # also 'lshift'
+ self.ENTER = "return" # also 'enter'
+ self.TAB = "tab"
+ self.ESC = "escape" # also 'esc'
+ self.BACKSPACE = "backspace"
+ self.DELETE = "delete" # also 'del'
+ self.INSERT = "insert"
+
+ self.CTRL = "ctrl" # also 'lctrl'
+ self.ALT = "alt" # also 'lalt'
+ self.SHIFT = "shift" # also 'lshift'
# TODO: 'meta key' is not available
self.META = None
- self.RCTRL = 'ctrlright'
- self.RALT = 'altright'
- self.RSHIFT = 'shiftright'
+ self.RCTRL = "ctrlright"
+ self.RALT = "altright"
+ self.RSHIFT = "shiftright"
# TODO: 'meta key' is not available
self.RMETA = None
- self.F1 = 'f1'
- self.F2 = 'f2'
- self.F3 = 'f3'
- self.F4 = 'f4'
- self.F5 = 'f5'
- self.F6 = 'f6'
- self.F7 = 'f7'
- self.F8 = 'f8'
- self.F9 = 'f9'
- self.F10 = 'f10'
- self.F11 = 'f11'
- self.F12 = 'f12'
- self.F13 = 'f13'
- self.F14 = 'f14'
- self.F15 = 'f15'
- self.F16 = 'f16'
- self.F17 = 'f17'
- self.F18 = 'f18'
- self.F19 = 'f19'
- self.F20 = 'f20'
-
- self.HOME = 'home'
- self.END = 'end'
- self.LEFT = 'left'
- self.RIGHT = 'right'
- self.UP = 'up'
- self.DOWN = 'down'
- self.PAGE_DOWN = 'pgdn'
- self.PAGE_UP = 'pgup'
-
- self.CAPS_LOCK = 'capslock'
- self.PRINTSCREEN = 'printscreen'
- self.PAUSE = 'pause'
- self.SCROLL_LOCK = 'scrolllock'
- self.NUM_LOCK = 'numlock'
+ self.F1 = "f1"
+ self.F2 = "f2"
+ self.F3 = "f3"
+ self.F4 = "f4"
+ self.F5 = "f5"
+ self.F6 = "f6"
+ self.F7 = "f7"
+ self.F8 = "f8"
+ self.F9 = "f9"
+ self.F10 = "f10"
+ self.F11 = "f11"
+ self.F12 = "f12"
+ self.F13 = "f13"
+ self.F14 = "f14"
+ self.F15 = "f15"
+ self.F16 = "f16"
+ self.F17 = "f17"
+ self.F18 = "f18"
+ self.F19 = "f19"
+ self.F20 = "f20"
+
+ self.HOME = "home"
+ self.END = "end"
+ self.LEFT = "left"
+ self.RIGHT = "right"
+ self.UP = "up"
+ self.DOWN = "down"
+ self.PAGE_DOWN = "pgdn"
+ self.PAGE_UP = "pgup"
+
+ self.CAPS_LOCK = "capslock"
+ self.PRINTSCREEN = "printscreen"
+ self.PAUSE = "pause"
+ self.SCROLL_LOCK = "scrolllock"
+ self.NUM_LOCK = "numlock"
# TODO: these are not available
self.SYS_REQ = None
self.SUPER = None
@@ -554,20 +555,23 @@ def __init__(self) -> None:
def to_string(self, key: str) -> str:
"""
- Provide with a text representation of a desired modifier key
- according to the custom BC backend.
+ Provide with a text representation of a desired modifier key according to the custom BC backend.
:param key: selected modifier name according to the current backend
:returns: text representation of the selected modifier
:raises: :py:class:`ValueError` if `key` is not found in the current modifier map
"""
if key is None:
- raise ValueError("The modifier key %s does not exist in the current modifier map" % key)
- return {self.MOD_NONE: "None",
- self.MOD_CTRL: "Ctrl",
- self.MOD_ALT: "Alt",
- self.MOD_SHIFT: "Shift",
- self.MOD_META: "Meta"}[key]
+ raise ValueError(
+ "The modifier key %s does not exist in the current modifier map" % key
+ )
+ return {
+ self.MOD_NONE: "None",
+ self.MOD_CTRL: "Ctrl",
+ self.MOD_ALT: "Alt",
+ self.MOD_SHIFT: "Shift",
+ self.MOD_META: "Meta",
+ }[key]
class AutoPyKeyModifier(KeyModifier):
@@ -596,10 +600,10 @@ def __init__(self) -> None:
# TODO: 'none' is not available
self.MOD_NONE = None
- self.MOD_CTRL = 'ctrl'
- self.MOD_ALT = 'alt'
- self.MOD_SHIFT = 'shift'
- self.MOD_META = 'meta'
+ self.MOD_CTRL = "ctrl"
+ self.MOD_ALT = "alt"
+ self.MOD_SHIFT = "shift"
+ self.MOD_META = "meta"
class VNCDoToolKeyModifier(KeyModifier):
@@ -611,10 +615,10 @@ def __init__(self) -> None:
# TODO: 'none' is not available
self.MOD_NONE = None
- self.MOD_CTRL = 'ctrl'
- self.MOD_ALT = 'alt'
- self.MOD_SHIFT = 'shift'
- self.MOD_META = 'meta'
+ self.MOD_CTRL = "ctrl"
+ self.MOD_ALT = "alt"
+ self.MOD_SHIFT = "shift"
+ self.MOD_META = "meta"
class PyAutoGUIKeyModifier(KeyModifier):
@@ -626,9 +630,9 @@ def __init__(self) -> None:
# TODO: 'none' is not available
self.MOD_NONE = None
- self.MOD_CTRL = 'ctrl'
- self.MOD_ALT = 'alt'
- self.MOD_SHIFT = 'shift'
+ self.MOD_CTRL = "ctrl"
+ self.MOD_ALT = "alt"
+ self.MOD_SHIFT = "shift"
# TODO: 'meta' is not available
self.MOD_META = None
@@ -644,8 +648,7 @@ def __init__(self) -> None:
def to_string(self, key: str) -> str:
"""
- Provide with a text representation of a desired mouse button
- according to the custom BC backend.
+ Provide with a text representation of a desired mouse button according to the custom BC backend.
:param key: selected mouse button according to the current backend
:returns: text representation of the selected mouse button
@@ -653,9 +656,11 @@ def to_string(self, key: str) -> str:
"""
if key is None:
raise ValueError("The key %s does not exist in the current mouse map" % key)
- return {self.LEFT_BUTTON: "MouseLeft",
- self.RIGHT_BUTTON: "MouseRight",
- self.CENTER_BUTTON: "MouseCenter"}[key]
+ return {
+ self.LEFT_BUTTON: "MouseLeft",
+ self.RIGHT_BUTTON: "MouseRight",
+ self.CENTER_BUTTON: "MouseCenter",
+ }[key]
class AutoPyMouseButton(MouseButton):
@@ -703,6 +708,6 @@ def __init__(self) -> None:
"""Build an instance containing the mouse button map for the PyAutoGUI backend."""
super().__init__()
- self.LEFT_BUTTON = 'left'
- self.RIGHT_BUTTON = 'right'
- self.CENTER_BUTTON = 'middle'
+ self.LEFT_BUTTON = "left"
+ self.RIGHT_BUTTON = "right"
+ self.CENTER_BUTTON = "middle"
diff --git a/guibot/location.py b/guibot/location.py
index d6668e4b..bdf92443 100644
--- a/guibot/location.py
+++ b/guibot/location.py
@@ -14,15 +14,14 @@
# along with guibot. If not, see .
"""
+Simple class to hold screen location data.
SUMMARY
------------------------------------------------------
-Simple class to hold screen location data.
..note:: Unless this class becomes more useful for the extra OOP abstraction
it might get deprecated in favor of a simple (x, y) tuple.
-
INTERFACE
------------------------------------------------------
@@ -53,6 +52,7 @@ def get_x(self) -> int:
:returns: x coordinate of the location
"""
return self._xpos
+
x = property(fget=get_x)
def get_y(self) -> int:
@@ -62,4 +62,5 @@ def get_y(self) -> int:
:returns: y coordinate of the location
"""
return self._ypos
+
y = property(fget=get_y)
diff --git a/guibot/match.py b/guibot/match.py
index 26fc930f..8050035b 100644
--- a/guibot/match.py
+++ b/guibot/match.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Class and functionality related to target matches on screen.
SUMMARY
------------------------------------------------------
-Class and functionality related to target matches on screen.
INTERFACE
@@ -32,14 +32,20 @@
class Match(Region):
- """
- Wrapper around image which adds data necessary for manipulation
- of matches on a screen.
- """
-
- def __init__(self, xpos: int, ypos: int, width: int, height: int,
- dx: int = 0, dy: int = 0, similarity: float = 0.0,
- dc: Controller = None, cv: "Finder" = None) -> None:
+ """Wrapper around region which adds data necessary for manipulation of matches on a screen."""
+
+ def __init__(
+ self,
+ xpos: int,
+ ypos: int,
+ width: int,
+ height: int,
+ dx: int = 0,
+ dy: int = 0,
+ similarity: float = 0.0,
+ dc: Controller = None,
+ cv: "Finder" = None,
+ ) -> None:
"""
Build a match object.
@@ -73,6 +79,7 @@ def set_x(self, value: int) -> None:
:param value: x coordinate of the upleft vertex of the region
"""
self._xpos = value
+
x = property(fget=Region.get_x, fset=set_x)
def set_y(self, value: int) -> None:
@@ -84,6 +91,7 @@ def set_y(self, value: int) -> None:
:param value: y coordinate of the upleft vertex of the region
"""
self._ypos = value
+
y = property(fget=Region.get_y, fset=set_y)
def get_dx(self) -> int:
@@ -93,6 +101,7 @@ def get_dx(self) -> int:
:returns: x offset from the center of the match region
"""
return self._dx
+
dx = property(fget=get_dx)
def get_dy(self) -> int:
@@ -102,6 +111,7 @@ def get_dy(self) -> int:
:returns: y offset from the center of the match region
"""
return self._dy
+
dy = property(fget=get_dy)
def get_similarity(self) -> float:
@@ -111,6 +121,7 @@ def get_similarity(self) -> float:
:returns: similarity the match was obtained with
"""
return self._similarity
+
similarity = property(fget=get_similarity)
def get_target(self) -> Location:
@@ -119,13 +130,19 @@ def get_target(self) -> Location:
:returns: target location to click on if clicking on the match
"""
- return self.calc_click_point(self._xpos, self._ypos,
- self._width, self._height,
- Location(self._dx, self._dy))
+ return self.calc_click_point(
+ self._xpos,
+ self._ypos,
+ self._width,
+ self._height,
+ Location(self._dx, self._dy),
+ )
+
target = property(fget=get_target)
- def calc_click_point(self, xpos: int, ypos: int, width: int, height: int,
- offset: Location) -> Location:
+ def calc_click_point(
+ self, xpos: int, ypos: int, width: int, height: int, offset: Location
+ ) -> Location:
"""
Calculate target location to click on if clicking on the match.
@@ -136,8 +153,9 @@ def calc_click_point(self, xpos: int, ypos: int, width: int, height: int,
:param offset: offset from the match region center for the final target
:returns: target location to click on if clicking on the match
"""
- center_region = Region(0, 0, width, height,
- dc=self.dc_backend, cv=self.cv_backend)
+ center_region = Region(
+ 0, 0, width, height, dc=self.dc_backend, cv=self.cv_backend
+ )
click_center = center_region.center
target_xpos = xpos + click_center.x + offset.x
diff --git a/guibot/path.py b/guibot/path.py
index 4e398d6a..33dee520 100644
--- a/guibot/path.py
+++ b/guibot/path.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Old module for path resolution - to be deprecated.
SUMMARY
------------------------------------------------------
-Old module for path resolution - to be deprecated.
INTERFACE
@@ -32,8 +32,9 @@
from .fileresolver import FileResolver as Path
-logging.getLogger("guibot.path")\
- .warn("The `path` module is deprecated, use `fileresolver` instead.")
+logging.getLogger("guibot.path").warn(
+ "The `path` module is deprecated, use `fileresolver` instead."
+)
__all__ = ["Path"]
diff --git a/guibot/region.py b/guibot/region.py
index 44b6f323..9b49f57a 100644
--- a/guibot/region.py
+++ b/guibot/region.py
@@ -14,17 +14,16 @@
# along with guibot. If not, see .
"""
+Secondary (and more advanced) interface for generic screen regions.
SUMMARY
------------------------------------------------------
-Secondary (and more advanced) interface for generic screen regions.
The main guibot interface is just a specialized region where we could match
and work with subregions. Any region instance can also be a complete screen,
hence the increased generality of using this as an interface and calling it
directly.
-
INTERFACE
------------------------------------------------------
@@ -32,6 +31,7 @@
import time
import os
+import logging
# interconnected classes - carefully avoid circular reference
from .config import GlobalConfig
@@ -42,18 +42,26 @@
from .finder import *
from .controller import *
-import logging
-log = logging.getLogger('guibot.region')
+
+log = logging.getLogger("guibot.region")
class Region(object):
"""
- Region of the screen supporting vertex and nearby region selection,
- validation of expected images, and mouse and keyboard control.
+ Region of the screen at a given position and with a given size.
+
+ It supports vertex and nearby region selection, validation of expected images, and mouse and keyboard control.
"""
- def __init__(self, xpos: int = 0, ypos: int = 0, width: int = 0, height: int = 0,
- dc: Controller = None, cv: "Finder" = None) -> None:
+ def __init__(
+ self,
+ xpos: int = 0,
+ ypos: int = 0,
+ width: int = 0,
+ height: int = 0,
+ dc: Controller = None,
+ cv: "Finder" = None,
+ ) -> None:
"""
Build a region object from upleft to downright vertex coordinates.
@@ -123,17 +131,17 @@ def __init__(self, xpos: int = 0, ypos: int = 0, width: int = 0, height: int = 0
mouse_map = self.dc_backend.mousemap
for mouse_button in dir(mouse_map):
- if mouse_button.endswith('_BUTTON'):
+ if mouse_button.endswith("_BUTTON"):
setattr(self, mouse_button, getattr(mouse_map, mouse_button))
key_map = self.dc_backend.keymap
for key in dir(key_map):
- if not key.startswith('__') and key != "to_string":
+ if not key.startswith("__") and key != "to_string":
setattr(self, key, getattr(key_map, key))
mod_map = self.dc_backend.modmap
for modifier_key in dir(mod_map):
- if modifier_key.startswith('MOD_'):
+ if modifier_key.startswith("MOD_"):
setattr(self, modifier_key, getattr(mod_map, modifier_key))
def _ensure_screen_clipping(self) -> None:
@@ -165,6 +173,7 @@ def get_x(self) -> int:
:returns: x coordinate of the upleft vertex of the region
"""
return self._xpos
+
x = property(fget=get_x)
def get_y(self) -> int:
@@ -174,6 +183,7 @@ def get_y(self) -> int:
:returns: y coordinate of the upleft vertex of the region
"""
return self._ypos
+
y = property(fget=get_y)
def get_width(self) -> int:
@@ -183,6 +193,7 @@ def get_width(self) -> int:
:returns: width of the region (xpos+width for downright vertex x)
"""
return self._width
+
width = property(fget=get_width)
def get_height(self) -> int:
@@ -192,6 +203,7 @@ def get_height(self) -> int:
:returns: height of the region (ypos+height for downright vertex y)
"""
return self._height
+
height = property(fget=get_height)
def get_center(self) -> Location:
@@ -204,6 +216,7 @@ def get_center(self) -> Location:
ypos = self._ypos + int(self._height / 2)
return Location(xpos, ypos)
+
center = property(fget=get_center)
def get_top_left(self) -> Location:
@@ -213,6 +226,7 @@ def get_top_left(self) -> Location:
:returns: upleft vertex of the region
"""
return Location(self._xpos, self._ypos)
+
top_left = property(fget=get_top_left)
def get_top_right(self) -> Location:
@@ -222,6 +236,7 @@ def get_top_right(self) -> Location:
:returns: upright vertex of the region
"""
return Location(self._xpos + self._width, self._ypos)
+
top_right = property(fget=get_top_right)
def get_bottom_left(self) -> Location:
@@ -231,6 +246,7 @@ def get_bottom_left(self) -> Location:
:returns: downleft vertex of the region
"""
return Location(self._xpos, self._ypos + self._height)
+
bottom_left = property(fget=get_bottom_left)
def get_bottom_right(self) -> Location:
@@ -240,6 +256,7 @@ def get_bottom_right(self) -> Location:
:returns: downright vertex of the region
"""
return Location(self._xpos + self._width, self._ypos + self._height)
+
bottom_right = property(fget=get_bottom_right)
def is_empty(self) -> bool:
@@ -249,6 +266,7 @@ def is_empty(self) -> bool:
:returns: whether the region is empty, i.e. has zero size
"""
return self._width == 0 and self._height == 0
+
is_empty = property(fget=is_empty)
def get_last_match(self) -> "Match":
@@ -258,6 +276,7 @@ def get_last_match(self) -> "Match":
:returns: last match obtained from finding a target within the region
"""
return self._last_match
+
last_match = property(fget=get_last_match)
def get_mouse_location(self) -> Location:
@@ -267,14 +286,14 @@ def get_mouse_location(self) -> Location:
:returns: mouse location
"""
return self.dc_backend.mouse_location
+
mouse_location = property(fget=get_mouse_location)
"""Main region methods"""
def nearby(self, rrange: int = 50) -> "Region":
"""
- Obtain a region containing the previous one but enlarged
- by a number of pixels on each side.
+ Obtain a region containing the previous one but enlarged by a number of pixels on each side.
:param rrange: number of pixels to add
:returns: new region enlarged by `rrange` on all sides
@@ -292,13 +311,13 @@ def nearby(self, rrange: int = 50) -> "Region":
new_height = self._height + rrange + self._ypos - new_ypos
# Final clipping is done in the Region constructor
- return Region(new_xpos, new_ypos, new_width, new_height,
- self.dc_backend, self.cv_backend)
+ return Region(
+ new_xpos, new_ypos, new_width, new_height, self.dc_backend, self.cv_backend
+ )
def above(self, rrange: int = 0) -> "Region":
"""
- Obtain a region containing the previous one but enlarged
- by a number of pixels on the upper side.
+ Obtain an enlarged region by a number of pixels on the upper side.
:param rrange: number of pixels to add
:returns: new region enlarged by `rrange` on upper side
@@ -315,13 +334,18 @@ def above(self, rrange: int = 0) -> "Region":
new_height = self._height + self._ypos - new_ypos
# Final clipping is done in the Region constructor
- return Region(self._xpos, new_ypos, self._width, new_height,
- self.dc_backend, self.cv_backend)
+ return Region(
+ self._xpos,
+ new_ypos,
+ self._width,
+ new_height,
+ self.dc_backend,
+ self.cv_backend,
+ )
def below(self, rrange: int = 0) -> "Region":
"""
- Obtain a region containing the previous one but enlarged
- by a number of pixels on the lower side.
+ Obtain an enlarged region by a number of pixels on the lower side.
:param rrange: number of pixels to add
:returns: new region enlarged by `rrange` on lower side
@@ -333,13 +357,18 @@ def below(self, rrange: int = 0) -> "Region":
new_height = self._height + rrange
# Final clipping is done in the Region constructor
- return Region(self._xpos, self._ypos, self._width, new_height,
- self.dc_backend, self.cv_backend)
+ return Region(
+ self._xpos,
+ self._ypos,
+ self._width,
+ new_height,
+ self.dc_backend,
+ self.cv_backend,
+ )
def left(self, rrange: int = 0) -> "Region":
"""
- Obtain a region containing the previous one but enlarged
- by a number of pixels on the left side.
+ Obtain an enlarged region by a number of pixels on the left side..
:param rrange: number of pixels to add
:returns: new region enlarged by `rrange` on left side
@@ -356,13 +385,18 @@ def left(self, rrange: int = 0) -> "Region":
new_width = self._width + self._xpos - new_xpos
# Final clipping is done in the Region constructor
- return Region(new_xpos, self._ypos, new_width, self._height,
- self.dc_backend, self.cv_backend)
+ return Region(
+ new_xpos,
+ self._ypos,
+ new_width,
+ self._height,
+ self.dc_backend,
+ self.cv_backend,
+ )
def right(self, rrange: int = 0) -> "Region":
"""
- Obtain a region containing the previous one but enlarged
- by a number of pixels on the right side.
+ Obtain an enlarged region by a number of pixels on the right side.
:param rrange: number of pixels to add
:returns: new region enlarged by `rrange` on right side
@@ -374,8 +408,14 @@ def right(self, rrange: int = 0) -> "Region":
new_width = self._width + rrange
# Final clipping is done in the Region constructor
- return Region(self._xpos, self._ypos, new_width, self._height,
- self.dc_backend, self.cv_backend)
+ return Region(
+ self._xpos,
+ self._ypos,
+ new_width,
+ self._height,
+ self.dc_backend,
+ self.cv_backend,
+ )
"""Image expect methods"""
@@ -393,8 +433,9 @@ def find(self, target: str | Target, timeout: int = 10) -> "Match":
matches = self.find_all(target, timeout=timeout, allow_zero=False)
return matches[0]
- def find_all(self, target: str | Target, timeout: int = 10,
- allow_zero: bool = False) -> "list[Match]":
+ def find_all(
+ self, target: str | Target, timeout: int = 10, allow_zero: bool = False
+ ) -> "list[Match]":
"""
Find multiples of a target on the screen.
@@ -423,13 +464,25 @@ def find_all(self, target: str | Target, timeout: int = 10,
relative_matches = cv_backend.find(target, screen_capture)
if len(relative_matches) > 0:
from .match import Match
+
for i, match in enumerate(relative_matches):
absolute_x, absolute_y = match.x + self.x, match.y + self.y
- new_match = Match(absolute_x, absolute_y,
- match.width, match.height, match.dx, match.dy,
- match.similarity, dc=dc_backend, cv=cv_backend)
+ new_match = Match(
+ absolute_x,
+ absolute_y,
+ match.width,
+ match.height,
+ match.dx,
+ match.dy,
+ match.similarity,
+ dc=dc_backend,
+ cv=cv_backend,
+ )
if len(last_matches) > i:
- if last_matches[i].x == absolute_x and last_matches[i].y == absolute_y:
+ if (
+ last_matches[i].x == absolute_x
+ and last_matches[i].y == absolute_y
+ ):
moving_targets = False
last_matches[i] = new_match
else:
@@ -437,19 +490,23 @@ def find_all(self, target: str | Target, timeout: int = 10,
moving_targets = True
last_matches.append(new_match)
self._last_match = last_matches[-1]
- if not GlobalConfig.wait_for_animations == True or not moving_targets:
+ if GlobalConfig.wait_for_animations is not True or not moving_targets:
return last_matches
elif time.time() > timeout_limit:
if allow_zero:
return last_matches
else:
- if GlobalConfig.save_needle_on_error == True:
+ if GlobalConfig.save_needle_on_error is True:
if not os.path.exists(ImageLogger.logging_destination):
os.mkdir(ImageLogger.logging_destination)
dump_path = GlobalConfig.image_logging_destination
- hdump_path = os.path.join(dump_path, "last_finderror_haystack.png")
- ndump_path = os.path.join(dump_path, "last_finderror_needle.png")
+ hdump_path = os.path.join(
+ dump_path, "last_finderror_haystack.png"
+ )
+ ndump_path = os.path.join(
+ dump_path, "last_finderror_needle.png"
+ )
screen_capture.save(hdump_path)
target.save(ndump_path)
raise FindError(target)
@@ -479,18 +536,23 @@ def _determine_cv_backend(self, target: Target) -> "Match":
return target.match_settings
if isinstance(target, Text) and not isinstance(self.cv_backend, TextFinder):
raise IncompatibleTargetError("Need text matcher for matching text")
- if isinstance(target, Pattern) and not (isinstance(self.cv_backend, CascadeFinder)
- or isinstance(self.cv_backend, DeepFinder)):
+ if isinstance(target, Pattern) and not (
+ isinstance(self.cv_backend, CascadeFinder)
+ or isinstance(self.cv_backend, DeepFinder)
+ ):
raise IncompatibleTargetError("Need pattern matcher for matching patterns")
if isinstance(target, Chain) and not isinstance(self.cv_backend, HybridFinder):
- raise IncompatibleTargetError("Need hybrid matcher for matching chain targets")
+ raise IncompatibleTargetError(
+ "Need hybrid matcher for matching chain targets"
+ )
target.match_settings = self.cv_backend
return self.cv_backend
def sample(self, target: str | Target) -> float:
"""
- Sample the similarity between a target and the screen,
- i.e. an empirical probability that the target is on the screen.
+ Sample the similarity between a target and the screen.
+
+ Similarity here means an empirical probability that the target is on the screen.
:param target: target to look for
:returns: similarity with best match on the screen
@@ -511,8 +573,7 @@ def sample(self, target: str | Target) -> float:
def exists(self, target: str | Target, timeout: int = 0) -> "Match | None":
"""
- Check if a target exists on the screen using the matching
- success as a threshold for the existence.
+ Check if a target exists on the screen using similarity as a threshold.
:param target: target to look for
:param timeout: timeout before giving up
@@ -528,8 +589,7 @@ def exists(self, target: str | Target, timeout: int = 0) -> "Match | None":
def wait(self, target: str | Target, timeout: int = 30) -> "Match":
"""
- Wait for a target to appear (be matched) with a given timeout
- as failing tolerance.
+ Wait for a target to appear (be matched) with a given timeout as failing tolerance.
:param target: target to look for
:param timeout: timeout before giving up
@@ -541,8 +601,7 @@ def wait(self, target: str | Target, timeout: int = 30) -> "Match":
def wait_vanish(self, target: str | Target, timeout: int = 30) -> "Region":
"""
- Wait for a target to disappear (be unmatched, i.e. matched
- without success) with a given timeout as failing tolerance.
+ Wait for a target to disappear (be unmatched) with a given timeout as failing tolerance.
:param target: target to look for
:param timeout: timeout before giving up
@@ -581,7 +640,9 @@ def idle(self, timeout: int) -> "Region":
"""Mouse methods"""
- def hover(self, target_or_location: "Match | Location | str | Target") -> "Match | None":
+ def hover(
+ self, target_or_location: "Match | Location | str | Target"
+ ) -> "Match | None":
"""
Hover the mouse over a target or location.
@@ -593,6 +654,7 @@ def hover(self, target_or_location: "Match | Location | str | Target") -> "Match
# Handle Match
from .match import Match
+
if isinstance(target_or_location, Match):
self.dc_backend.mouse_move(target_or_location.target, smooth)
return None
@@ -608,11 +670,15 @@ def hover(self, target_or_location: "Match | Location | str | Target") -> "Match
return match
- def click(self, target_or_location: "Match | Location | str | Target",
- modifiers: list[str] = None) -> "Match | None":
+ def click(
+ self,
+ target_or_location: "Match | Location | str | Target",
+ modifiers: list[str] = None,
+ ) -> "Match | None":
"""
- Click on a target or location using the left mouse button and
- optionally holding special keys.
+ Click on a target or location using the left mouse button.
+
+ Optionally we can hold some special keys.
:param target_or_location: target or location to click on
:param modifiers: special keys to hold during clicking
@@ -630,11 +696,15 @@ def click(self, target_or_location: "Match | Location | str | Target",
self.dc_backend.mouse_click(self.LEFT_BUTTON, 1, modifiers)
return match
- def right_click(self, target_or_location: "Match | Location | str | Target",
- modifiers: list[str] = None) -> "Match | None":
+ def right_click(
+ self,
+ target_or_location: "Match | Location | str | Target",
+ modifiers: list[str] = None,
+ ) -> "Match | None":
"""
- Click on a target or location using the right mouse button and
- optionally holding special keys.
+ Click on a target or location using the right mouse button.
+
+ Optionally we can hold some special keys.
Arguments and return values are analogical to :py:func:`Region.click`.
"""
@@ -645,11 +715,15 @@ def right_click(self, target_or_location: "Match | Location | str | Target",
self.dc_backend.mouse_click(self.RIGHT_BUTTON, 1, modifiers)
return match
- def middle_click(self, target_or_location: "Match | Location | str | Target",
- modifiers: list[str] = None) -> "Match | None":
+ def middle_click(
+ self,
+ target_or_location: "Match | Location | str | Target",
+ modifiers: list[str] = None,
+ ) -> "Match | None":
"""
- Click on a target or location using the middle mouse button and
- optionally holding special keys.
+ Click on a target or location using the middle mouse button.
+
+ Optionally we can hold some special keys.
Arguments and return values are analogical to :py:func:`Region.click`.
"""
@@ -660,11 +734,13 @@ def middle_click(self, target_or_location: "Match | Location | str | Target",
self.dc_backend.mouse_click(self.CENTER_BUTTON, 1, modifiers)
return match
- def double_click(self, target_or_location: "Match | Location | str | Target",
- modifiers: list[str] = None) -> "Match | None":
+ def double_click(
+ self,
+ target_or_location: "Match | Location | str | Target",
+ modifiers: list[str] = None,
+ ) -> "Match | None":
"""
- Double click on a target or location using the left mouse button
- and optionally holding special keys.
+ Double click on a target or location using the left mouse button special keys.
Arguments and return values are analogical to :py:func:`Region.click`.
"""
@@ -675,11 +751,16 @@ def double_click(self, target_or_location: "Match | Location | str | Target",
self.dc_backend.mouse_click(self.LEFT_BUTTON, 2, modifiers)
return match
- def multi_click(self, target_or_location: "Match | Location | str | Target",
- count: int = 3, modifiers: list[str] = None) -> "Match | None":
+ def multi_click(
+ self,
+ target_or_location: "Match | Location | str | Target",
+ count: int = 3,
+ modifiers: list[str] = None,
+ ) -> "Match | None":
"""
- Click N times on a target or location using the left mouse button
- and optionally holding special keys.
+ Click N times on a target or location using the left mouse button.
+
+ Optionally we can hold some special keys.
Arguments and return values are analogical to :py:func:`Region.click`.
"""
@@ -690,9 +771,14 @@ def multi_click(self, target_or_location: "Match | Location | str | Target",
self.dc_backend.mouse_click(self.LEFT_BUTTON, count, modifiers)
return match
- def click_expect(self, click_image_or_location: Image | Location,
- expect_target: str | Target, modifiers: list[str] = None,
- timeout: int = 60, retries: int = 3) -> "Match | Region":
+ def click_expect(
+ self,
+ click_image_or_location: Image | Location,
+ expect_target: str | Target,
+ modifiers: list[str] = None,
+ timeout: int = 60,
+ retries: int = 3,
+ ) -> "Match | Region":
"""
Click on an image or location and wait for another one to appear.
@@ -705,7 +791,7 @@ def click_expect(self, click_image_or_location: Image | Location,
"""
for i in range(retries):
if i > 0:
- log.info("Retrying the mouse click (%s of %s)", i+1, retries)
+ log.info("Retrying the mouse click (%s of %s)", i + 1, retries)
self.click(click_image_or_location, modifiers=modifiers)
try:
return self.wait(expect_target, timeout)
@@ -715,9 +801,14 @@ def click_expect(self, click_image_or_location: Image | Location,
raise error
return self
- def click_vanish(self, click_image_or_location: Image | Location,
- expect_target: str | Target, modifiers: list[str] = None,
- timeout: int = 60, retries: int = 3) -> "Region":
+ def click_vanish(
+ self,
+ click_image_or_location: Image | Location,
+ expect_target: str | Target,
+ modifiers: list[str] = None,
+ timeout: int = 60,
+ retries: int = 3,
+ ) -> "Region":
"""
Click on an image or location and wait for another one to disappear.
@@ -730,7 +821,7 @@ def click_vanish(self, click_image_or_location: Image | Location,
"""
for i in range(retries):
if i > 0:
- log.info("Retrying the mouse click (%s of %s)", i+1, retries)
+ log.info("Retrying the mouse click (%s of %s)", i + 1, retries)
self.click(click_image_or_location, modifiers=modifiers)
try:
return self.wait_vanish(expect_target, timeout)
@@ -740,18 +831,22 @@ def click_vanish(self, click_image_or_location: Image | Location,
raise error
return self
- def click_at_index(self, anchor: str | Target, index: int = 0,
- find_number: int = 3, timeout: int = 10) -> "Match":
+ def click_at_index(
+ self,
+ anchor: str | Target,
+ index: int = 0,
+ find_number: int = 3,
+ timeout: int = 10,
+ ) -> "Match":
"""
- Find all instances of an anchor image and click on the one with the
- desired index given that they are horizontally then vertically sorted.
+ Find and click on a specific instance of an anchor image indexed by horizontal and vertical sorting.
:param anchor: image to find all matches of
:param index: index of the match to click on (assuming >=1 matches),
- sorted according to their (x,y) coordinates
+ sorted according to their (x,y) coordinates
:param find_number: expected number of matches which is necessary
- for fast failure in case some elements are not visualized and/or
- proper matching result
+ for fast failure in case some elements are not visualized and/or
+ proper matching result
:param timeout: timeout before which the number of matches should be found
:returns: match from finding the target of the desired index
@@ -784,13 +879,17 @@ def click_at_index(self, anchor: str | Target, index: int = 0,
self.find(anchor)
sorted_targets = sorted(targets, key=lambda x: (x.x, x.y))
- logging.debug("Totally %s clicking matches found: %s", len(sorted_targets),
- ["(%s, %s)" % (x.x, x.y) for x in sorted_targets])
+ logging.debug(
+ "Totally %s clicking matches found: %s",
+ len(sorted_targets),
+ ["(%s, %s)" % (x.x, x.y) for x in sorted_targets],
+ )
self.click(sorted_targets[index])
return sorted_targets[index]
- def mouse_down(self, target_or_location: "Match | Location | str | Target",
- button: int = None) -> "Match | None":
+ def mouse_down(
+ self, target_or_location: "Match | Location | str | Target", button: int = None
+ ) -> "Match | None":
"""
Hold down an arbitrary mouse button on a target or location.
@@ -806,8 +905,9 @@ def mouse_down(self, target_or_location: "Match | Location | str | Target",
self.dc_backend.mouse_down(button)
return match
- def mouse_up(self, target_or_location: "Match | Location | str | Target",
- button: int = None) -> "Match | None":
+ def mouse_up(
+ self, target_or_location: "Match | Location | str | Target", button: int = None
+ ) -> "Match | None":
"""
Release an arbitrary mouse button on a target or location.
@@ -823,8 +923,12 @@ def mouse_up(self, target_or_location: "Match | Location | str | Target",
self.dc_backend.mouse_up(button)
return match
- def mouse_scroll(self, target_or_location: "Match | Location | str | Target",
- clicks: int = 10, horizontal: bool = False) -> "Match | None":
+ def mouse_scroll(
+ self,
+ target_or_location: "Match | Location | str | Target",
+ clicks: int = 10,
+ horizontal: bool = False,
+ ) -> "Match | None":
"""
Scroll the mouse for a number of clicks.
@@ -835,15 +939,21 @@ def mouse_scroll(self, target_or_location: "Match | Location | str | Target",
:returns: match from finding the target or nothing if scrolling on a known location
"""
match = self.hover(target_or_location)
- log.debug("Scrolling the mouse %s for %s clicks at %s",
- "horizontally" if horizontal else "vertically",
- clicks, target_or_location)
+ log.debug(
+ "Scrolling the mouse %s for %s clicks at %s",
+ "horizontally" if horizontal else "vertically",
+ clicks,
+ target_or_location,
+ )
self.dc_backend.mouse_scroll(clicks, horizontal)
return match
- def drag_drop(self, src_target_or_location: "Match | Location | str | Target",
- dst_target_or_location: "Match | Location | str | Target",
- modifiers: list[str] = None) -> "Match | None":
+ def drag_drop(
+ self,
+ src_target_or_location: "Match | Location | str | Target",
+ dst_target_or_location: "Match | Location | str | Target",
+ modifiers: list[str] = None,
+ ) -> "Match | None":
"""
Drag from and drop at a target or location optionally holding special keys.
@@ -857,8 +967,11 @@ def drag_drop(self, src_target_or_location: "Match | Location | str | Target",
match = self.drop_at(dst_target_or_location, modifiers)
return match
- def drag_from(self, target_or_location: "Match | Location | str | Target",
- modifiers: list[str] = None) -> "Match":
+ def drag_from(
+ self,
+ target_or_location: "Match | Location | str | Target",
+ modifiers: list[str] = None,
+ ) -> "Match":
"""
Drag from a target or location optionally holding special keys.
@@ -871,7 +984,7 @@ def drag_from(self, target_or_location: "Match | Location | str | Target",
if modifiers is not None:
log.info("Holding the modifiers %s", " ".join(modifiers))
self.dc_backend.keys_toggle(modifiers, True)
- #self.dc_backend.keys_toggle(["Ctrl"], True)
+ # self.dc_backend.keys_toggle(["Ctrl"], True)
log.info("Dragging %s", target_or_location)
self.dc_backend.mouse_down(self.LEFT_BUTTON)
@@ -879,8 +992,11 @@ def drag_from(self, target_or_location: "Match | Location | str | Target",
return match
- def drop_at(self, target_or_location: "Match | Location | str | Target",
- modifiers: list[str] = None) -> "Match":
+ def drop_at(
+ self,
+ target_or_location: "Match | Location | str | Target",
+ modifiers: list[str] = None,
+ ) -> "Match":
"""
Drop at a target or location optionally holding special keys.
@@ -921,11 +1037,13 @@ def press_keys(self, keys: str | list[str]) -> "Region":
self.dc_backend.keys_press(keys_list)
return self
- def press_at(self, keys: str | list[str],
- target_or_location: "Match | Location | str | Target") -> "Match":
+ def press_at(
+ self,
+ keys: str | list[str],
+ target_or_location: "Match | Location | str | Target",
+ ) -> "Match":
"""
- Press a single key or a list of keys simultaneously
- at a specified target or location.
+ Press a single key or a list of keys simultaneously at a specified target or location.
This method is similar to :py:func:`Region.press_keys` but
with an extra argument like :py:func:`Region.click`.
@@ -936,8 +1054,11 @@ def press_at(self, keys: str | list[str],
self.dc_backend.keys_press(keys_list)
return match
- def _parse_keys(self, keys: str | list[str],
- target_or_location: "Match | Location | str | Target" = None) -> list[str]:
+ def _parse_keys(
+ self,
+ keys: str | list[str],
+ target_or_location: "Match | Location | str | Target" = None,
+ ) -> list[str]:
at_str = " at %s" % target_or_location if target_or_location else ""
keys_list = []
@@ -953,14 +1074,18 @@ def _parse_keys(self, keys: str | list[str],
raise # a key cannot be a string (text)
key_strings.append(key)
keys_list.append(key)
- log.info("Pressing together keys '%s'%s",
- "'+'".join(keystr for keystr in key_strings),
- at_str)
+ log.info(
+ "Pressing together keys '%s'%s",
+ "'+'".join(keystr for keystr in key_strings),
+ at_str,
+ )
else:
# if not a list (i.e. if a single key)
key = keys
try:
- log.info("Pressing key '%s'%s", self.dc_backend.keymap.to_string(key), at_str)
+ log.info(
+ "Pressing key '%s'%s", self.dc_backend.keymap.to_string(key), at_str
+ )
# if not a special key (i.e. if a character key)
except KeyError:
if isinstance(key, int):
@@ -971,8 +1096,13 @@ def _parse_keys(self, keys: str | list[str],
keys_list.append(key)
return keys_list
- def press_expect(self, keys: list[str] | str, expect_target: str | Target,
- timeout: int = 60, retries: int = 3) -> "Match":
+ def press_expect(
+ self,
+ keys: list[str] | str,
+ expect_target: str | Target,
+ timeout: int = 60,
+ retries: int = 3,
+ ) -> "Match":
"""
Press a key and wait for a target to appear.
@@ -985,16 +1115,22 @@ def press_expect(self, keys: list[str] | str, expect_target: str | Target,
"""
for i in range(retries):
if i > 0:
- log.info("Retrying the key press (%s of %s)", i+1, retries)
+ log.info("Retrying the key press (%s of %s)", i + 1, retries)
self.press_keys(keys)
try:
return self.wait(expect_target, timeout)
except FindError as error:
if i == retries - 1:
raise error
+ return None
- def press_vanish(self, keys: list[str] | str, expect_target: str | Target,
- timeout: int = 60, retries: int = 3) -> "Region":
+ def press_vanish(
+ self,
+ keys: list[str] | str,
+ expect_target: str | Target,
+ timeout: int = 60,
+ retries: int = 3,
+ ) -> "Region":
"""
Press a key and wait for a target to disappear.
@@ -1007,7 +1143,7 @@ def press_vanish(self, keys: list[str] | str, expect_target: str | Target,
"""
for i in range(retries):
if i > 0:
- log.info("Retrying the key press (%s of %s)", i+1, retries)
+ log.info("Retrying the key press (%s of %s)", i + 1, retries)
self.press_keys(keys)
try:
return self.wait_vanish(expect_target, timeout)
@@ -1044,11 +1180,16 @@ def type_text(self, text: list[str] | str, modifiers: list[str] = None) -> "Regi
self.dc_backend.keys_type(text_list, modifiers)
return self
- def type_at(self, text: list[str] | str, target_or_location: "Match | Location | str | Target",
- modifiers: list[str] = None) -> "Match":
+ def type_at(
+ self,
+ text: list[str] | str,
+ target_or_location: "Match | Location | str | Target",
+ modifiers: list[str] = None,
+ ) -> "Match":
"""
- Type a list of consecutive character keys (without special keys)
- at a specified target or location.
+ Type a list of consecutive keys at a specified target or location.
+
+ These are meant to be characters and not special keys.
This method is similar to :py:func:`Region.type_text` but
with an extra argument like :py:func:`Region.click`.
@@ -1065,8 +1206,11 @@ def type_at(self, text: list[str] | str, target_or_location: "Match | Location |
self.dc_backend.keys_type(text_list, modifiers)
return match
- def _parse_text(self, text: list[str] | str,
- target_or_location: "Match | Location | str | Target" = None) -> list[str]:
+ def _parse_text(
+ self,
+ text: list[str] | str,
+ target_or_location: "Match | Location | str | Target" = None,
+ ) -> list[str]:
at_str = " at %s" % target_or_location if target_or_location else ""
text_list = []
@@ -1086,8 +1230,14 @@ def _parse_text(self, text: list[str] | str,
return text_list
"""Mixed (form) methods"""
- def click_at(self, anchor: "Match | Location | Target | str",
- dx: int, dy: int, count: int = 1) -> "Region":
+
+ def click_at(
+ self,
+ anchor: "Match | Location | Target | str",
+ dx: int,
+ dy: int,
+ count: int = 1,
+ ) -> "Region":
"""
Clicks on a relative location using a displacement from an anchor.
@@ -1099,6 +1249,7 @@ def click_at(self, anchor: "Match | Location | Target | str",
:raises: :py:class:`exceptions.ValueError` if `count` is not acceptable value
"""
from .match import Match
+
if isinstance(anchor, Match):
start_loc = anchor.target
elif isinstance(anchor, Location):
@@ -1111,11 +1262,18 @@ def click_at(self, anchor: "Match | Location | Target | str",
return self
- def fill_at(self, anchor: "Match | Location | Target | str",
- text: str, dx: int, dy: int, del_flag: bool = True,
- esc_flag: bool = True, mark_clicks: int = 1) -> "Region":
+ def fill_at(
+ self,
+ anchor: "Match | Location | Target | str",
+ text: str,
+ dx: int,
+ dy: int,
+ del_flag: bool = True,
+ esc_flag: bool = True,
+ mark_clicks: int = 1,
+ ) -> "Region":
"""
- Fills a new text at a text box using a displacement from an anchor.
+ Fill a new text at a text box using a displacement from an anchor.
:param anchor: target of reference for the input field
:param text: text to fill in
@@ -1161,13 +1319,23 @@ def fill_at(self, anchor: "Match | Location | Target | str",
return self
- def select_at(self, anchor: "Match | Location | Target | str",
- image_or_index: str | int, dx: int, dy: int, dw: int = 0,
- dh: int = 0, ret_flag: bool = True, mark_clicks: int = 1,
- tries: int = 3) -> "Region":
+ def select_at(
+ self,
+ anchor: "Match | Location | Target | str",
+ image_or_index: str | int,
+ dx: int,
+ dy: int,
+ dw: int = 0,
+ dh: int = 0,
+ ret_flag: bool = True,
+ mark_clicks: int = 1,
+ tries: int = 3,
+ ) -> "Region":
"""
- Select an option at a dropdown list using either an integer index
- or an option image if the order cannot be easily inferred.
+ Select an option at a dropdown list using an index or an image.
+
+ The caller can use either integer index or an option image if the
+ order cannot be easily inferred.
:param anchor: target of reference for the input dropdown menu
:param image_or_index: item image or item index
@@ -1221,10 +1389,14 @@ def select_at(self, anchor: "Match | Location | Target | str",
# list, therefore a total of 2 option heights spanning the haystack height.
# The haystack y displacement relative to 'loc' is then 1/2*1/2*dh
loc = self.get_mouse_location()
- dropdown_haystack = Region(xpos=int(loc.x - dw / 2),
- ypos=int(loc.y - dh / 4),
- width=dw, height=dh,
- dc=self.dc_backend, cv=self.cv_backend)
+ dropdown_haystack = Region(
+ xpos=int(loc.x - dw / 2),
+ ypos=int(loc.y - dh / 4),
+ width=dw,
+ height=dh,
+ dc=self.dc_backend,
+ cv=self.cv_backend,
+ )
try:
dropdown_haystack.click(image_or_index)
except FindError:
@@ -1232,7 +1404,15 @@ def select_at(self, anchor: "Match | Location | Target | str",
if tries == 1:
raise
logging.info("Opening the dropdown menu didn't work, retrying")
- self.select_at(anchor, image_or_index, dx, dy, dw, dh,
- mark_clicks=mark_clicks, tries=tries-1)
+ self.select_at(
+ anchor,
+ image_or_index,
+ dx,
+ dy,
+ dw,
+ dh,
+ mark_clicks=mark_clicks,
+ tries=tries - 1,
+ )
return self
diff --git a/guibot/target.py b/guibot/target.py
index a2381911..93c9227f 100644
--- a/guibot/target.py
+++ b/guibot/target.py
@@ -14,10 +14,10 @@
# along with guibot. If not, see .
"""
+Classes and functionality related to sought targets on screen.
SUMMARY
------------------------------------------------------
-Classes and functionality related to sought targets on screen.
INTERFACE
@@ -38,14 +38,11 @@
from .errors import *
-__all__ = ['Target', 'Image', 'Text', 'Pattern', 'Chain']
+__all__ = ["Target", "Image", "Text", "Pattern", "Chain"]
class Target(object):
- """
- Target used to obtain screen location for clicking, typing,
- validation of expected visual output, etc.
- """
+ """Target used to obtain screen location for clicking, typing, validation of expected visual output, etc."""
@staticmethod
def from_data_file(filename: str) -> "Target":
@@ -70,7 +67,9 @@ def from_data_file(filename: str) -> "Target":
elif extension == ".steps":
target = Chain(name)
else:
- raise IncompatibleTargetFileError("The target file %s is not among any of the known types" % filename)
+ raise IncompatibleTargetFileError(
+ "The target file %s is not among any of the known types" % filename
+ )
return target
@@ -88,7 +87,13 @@ def from_match_file(filename: str) -> "Target":
match_filename = os.path.splitext(filename)[0] + ".match"
finder = Finder.from_match_file(match_filename)
- if finder.params["find"]["backend"] in ("autopy", "contour", "template", "feature", "tempfeat"):
+ if finder.params["find"]["backend"] in (
+ "autopy",
+ "contour",
+ "template",
+ "feature",
+ "tempfeat",
+ ):
target = Image(filename, match_settings=finder)
elif finder.params["find"]["backend"] == "text":
target = Text(name, match_settings=finder)
@@ -144,6 +149,7 @@ def get_similarity(self) -> float:
:returns: similarity required for the image to be matched
"""
return self.match_settings.params["find"]["similarity"].value
+
similarity = property(fget=get_similarity)
def get_center_offset(self) -> Location:
@@ -156,6 +162,7 @@ def get_center_offset(self) -> Location:
it is then taken when matching to produce a clicking target for a match.
"""
return self._center_offset
+
center_offset = property(fget=get_center_offset)
def load(self, filename: str, **kwargs: dict[str, type]) -> None:
@@ -202,8 +209,7 @@ def copy(self) -> "Target":
def with_center_offset(self, xpos: int, ypos: int) -> "Target":
"""
- Perform a copy of the target data with new match settings
- and with a newly defined center offset.
+ Perform a copy of the target data with new match settings and with a newly defined center offset.
:param xpos: new offset in the x direction
:param ypos: new offset in the y direction
@@ -215,8 +221,7 @@ def with_center_offset(self, xpos: int, ypos: int) -> "Target":
def with_similarity(self, new_similarity: float) -> "Target":
"""
- Perform a copy of the target data with new match settings
- and with a newly defined required similarity.
+ Perform a copy of the target data with new match settings and with a newly defined required similarity.
:param new_similarity: new required similarity
:returns: copy of the current target with new similarity
@@ -227,15 +232,17 @@ def with_similarity(self, new_similarity: float) -> "Target":
class Image(Target):
- """
- Container for image data supporting caching, clicking target,
- file operations, and preprocessing.
- """
+ """Container for image data supporting caching, clicking target, file operations, and preprocessing."""
_cache = {}
- def __init__(self, image_filename: str = "", pil_image: PIL.Image.Image = None,
- match_settings: "Finder" = None, use_cache: bool = True) -> None:
+ def __init__(
+ self,
+ image_filename: str = "",
+ pil_image: PIL.Image.Image = None,
+ match_settings: "Finder" = None,
+ use_cache: bool = True,
+ ) -> None:
"""
Build an image object.
@@ -266,7 +273,11 @@ def __init__(self, image_filename: str = "", pil_image: PIL.Image.Image = None,
def __str__(self) -> str:
"""Provide the image filename."""
- return "noname" if self._filename == "" else os.path.splitext(os.path.basename(self._filename))[0]
+ return (
+ "noname"
+ if self._filename == ""
+ else os.path.splitext(os.path.basename(self._filename))[0]
+ )
def get_filename(self) -> str:
"""
@@ -275,6 +286,7 @@ def get_filename(self) -> str:
:returns: filename of the image
"""
return self._filename
+
filename = property(fget=get_filename)
def get_width(self) -> int:
@@ -284,6 +296,7 @@ def get_width(self) -> int:
:returns: width of the image
"""
return self._width
+
width = property(fget=get_width)
def get_height(self) -> int:
@@ -293,6 +306,7 @@ def get_height(self) -> int:
:returns: height of the image
"""
return self._height
+
height = property(fget=get_height)
def get_pil_image(self) -> PIL.Image.Image:
@@ -302,9 +316,12 @@ def get_pil_image(self) -> PIL.Image.Image:
:returns: image data of the image
"""
return self._pil_image
+
pil_image = property(fget=get_pil_image)
- def load(self, filename: str, use_cache: bool = True, **kwargs: dict[str, type]) -> None:
+ def load(
+ self, filename: str, use_cache: bool = True, **kwargs: dict[str, type]
+ ) -> None:
"""
Load image from a file.
@@ -320,7 +337,7 @@ def load(self, filename: str, use_cache: bool = True, **kwargs: dict[str, type])
self._pil_image = self._cache[filename]
else:
# load and cache image
- self._pil_image = PIL.Image.open(filename).convert('RGB')
+ self._pil_image = PIL.Image.open(filename).convert("RGB")
if use_cache:
self._cache[filename] = self._pil_image
self._filename = filename
@@ -346,13 +363,14 @@ def save(self, filename: str) -> "Image":
class Text(Target):
- """
- Container for text data which is visually identified
- using OCR or general text detection methods.
- """
+ """Container for text data which is visually identified using OCR or general text detection methods."""
- def __init__(self, value: str = None, text_filename: str = None,
- match_settings: "Finder" = None) -> None:
+ def __init__(
+ self,
+ value: str = None,
+ text_filename: str = None,
+ match_settings: "Finder" = None,
+ ) -> None:
"""
Build a text object.
@@ -374,7 +392,7 @@ def __init__(self, value: str = None, text_filename: str = None,
def __str__(self) -> str:
"""Provide a part of the text value."""
- return self.value[:30].replace('/', '').replace('\\', '')
+ return self.value[:30].replace("/", "").replace("\\", "")
def load(self, filename: str, **kwargs: dict[str, type]) -> None:
"""
@@ -408,26 +426,24 @@ def distance_to(self, str2: str) -> float:
"""
str1 = str(self.value)
import numpy
+
M = numpy.empty((len(str1) + 1, len(str2) + 1), int)
- for a in range(0, len(str1)+1):
+ for a in range(0, len(str1) + 1):
M[a, 0] = a
- for b in range(0, len(str2)+1):
+ for b in range(0, len(str2) + 1):
M[0, b] = b
- for a in range(1, len(str1)+1): # (size_t a = 1; a <= NA; ++a):
- for b in range(1, len(str2)+1): # (size_t b = 1; b <= NB; ++b)
- z = M[a-1, b-1] + (0 if str1[a-1] == str2[b-1] else 1)
- M[a, b] = min(min(M[a-1, b] + 1, M[a, b-1] + 1), z)
+ for a in range(1, len(str1) + 1): # (size_t a = 1; a <= NA; ++a):
+ for b in range(1, len(str2) + 1): # (size_t b = 1; b <= NB; ++b)
+ z = M[a - 1, b - 1] + (0 if str1[a - 1] == str2[b - 1] else 1)
+ M[a, b] = min(min(M[a - 1, b] + 1, M[a, b - 1] + 1), z)
return M[len(str1), len(str2)]
class Pattern(Target):
- """
- Container for abstracted data which is obtained from
- training of a classifier in order to recognize a target.
- """
+ """Container for abstracted data which is obtained from training of a classifier in order to recognize a target."""
def __init__(self, id: str, match_settings: "Finder" = None) -> None:
"""
@@ -522,6 +538,7 @@ def load(self, steps_filename: str, **kwargs: dict[str, type]) -> None:
:raises: :py:class:`errors.UnsupportedBackendError` if a chain step is of unknown type
:raises: :py:class:`IOError` if an chain step line cannot be parsed
"""
+
def resolve_stepsfile(filename: str) -> str:
"""
Try to find a valid steps file from a given file name.
@@ -546,7 +563,7 @@ def resolve_stepsfile(filename: str) -> str:
while lines:
step = lines.pop(0)
- dataconfig = re.split(r'\t+', step.rstrip('\t\n'))
+ dataconfig = re.split(r"\t+", step.rstrip("\t\n"))
# read a nested steps file and append to this chain
if dataconfig[0].endswith(".steps"):
@@ -572,12 +589,18 @@ def resolve_stepsfile(filename: str) -> str:
data_and_config = Pattern(data, match_settings=self.match_settings)
elif step_backend == "text":
if data.endswith(".txt"):
- data_and_config = Text(text_filename=data, match_settings=self.match_settings)
+ data_and_config = Text(
+ text_filename=data, match_settings=self.match_settings
+ )
else:
- data_and_config = Text(value=data, match_settings=self.match_settings)
+ data_and_config = Text(
+ value=data, match_settings=self.match_settings
+ )
else:
# in particular, we cannot have a chain within the chain since it is not useful
- raise UnsupportedBackendError("No target step type for '%s' backend" % step_backend)
+ raise UnsupportedBackendError(
+ "No target step type for '%s' backend" % step_backend
+ )
self._steps.append(data_and_config)
@@ -618,7 +641,9 @@ def save(self, steps_filename: str) -> None:
data = data_and_config.filename
else:
# in particular, we cannot have a chain within the chain since it is not useful
- raise UnsupportedBackendError("No target step type for '%s' backend" % step_backend)
+ raise UnsupportedBackendError(
+ "No target step type for '%s' backend" % step_backend
+ )
data_and_config.save(data)
save_lines.append(data + "\t" + os.path.splitext(data)[0] + ".match\n")