diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d08e15e7..a5626c0b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -3,7 +3,7 @@ name: Lint Check on: [push, pull_request] jobs: - mypy: + lint: runs-on: ubuntu-latest steps: @@ -17,8 +17,9 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install mypy + pip install mypy black - - name: Run mypy + - name: Run linters run: | mypy guibot + black --check --diff --color guibot diff --git a/guibot/calibrator.py b/guibot/calibrator.py index 60181153..c0c0919a 100644 --- a/guibot/calibrator.py +++ b/guibot/calibrator.py @@ -37,13 +37,16 @@ from .location import Location import logging -log = logging.getLogger('guibot.calibrator') + +log = logging.getLogger("guibot.calibrator") #: explicit blacklist of backend combinations to skip for benchmarking -benchmark_blacklist = [("mixed", "normal", "mixed", "east", "hmm", "adaptive", "adaptive"), - ("mixed", "adaptive", "mixed", "east", "hmm", "adaptive", "adaptive"), - ("mixed", "canny", "mixed", "east", "hmm", "adaptive", "adaptive")] +benchmark_blacklist = [ + ("mixed", "normal", "mixed", "east", "hmm", "adaptive", "adaptive"), + ("mixed", "adaptive", "mixed", "east", "hmm", "adaptive", "adaptive"), + ("mixed", "canny", "mixed", "east", "hmm", "adaptive", "adaptive"), +] class Calibrator(object): @@ -58,8 +61,9 @@ class Calibrator(object): multiple random starts from a uniform or normal probability distribution. """ - def __init__(self, needle: Target = None, haystack: Image = None, - config: str = None) -> None: + def __init__( + self, needle: Target = None, haystack: Image = None, config: str = None + ) -> None: """ Build a calibrator object for a given match case. @@ -79,18 +83,30 @@ def __init__(self, needle: Target = None, haystack: Image = None, haystack = Target.from_data_file(haystack) maximize = maximize == "max" self.cases.append((needle, haystack, maximize)) - log.info("Registering match case with needle %s and haystack %s for %s", - needle, haystack, "maximizing" if maximize else "minimizing") + log.info( + "Registering match case with needle %s and haystack %s for %s", + needle, + haystack, + "maximizing" if maximize else "minimizing", + ) else: - raise ValueError("Need at least a single needle/haystack for calibration" - " or a config file for more than one match case") + raise ValueError( + "Need at least a single needle/haystack for calibration" + " or a config file for more than one match case" + ) # this attribute can be changed to use different run function self.run = self.run_default - def benchmark(self, finder: Finder, random_starts: int = 0, uniform: bool = False, - calibration: bool = False, max_attempts: int = 3, - **kwargs: dict[str, type]) -> list[tuple[str, float, float]]: + def benchmark( + self, + finder: Finder, + random_starts: int = 0, + uniform: bool = False, + calibration: bool = False, + max_attempts: int = 3, + **kwargs: dict[str, type] + ) -> list[tuple[str, float, float]]: """ Perform benchmarking on all available algorithms of a finder for a given needle and haystack. @@ -109,8 +125,10 @@ def benchmark(self, finder: Finder, random_starts: int = 0, uniform: bool = Fals for a given `needle` and `haystack`. """ results = [] - log.info("Performing benchmarking %s calibration", - "with" if calibration else "without") + log.info( + "Performing benchmarking %s calibration", + "with" if calibration else "without", + ) # block logging since we need all its info after the matching finishes ImageLogger.accumulate_logging = True @@ -121,7 +139,9 @@ def benchmark(self, finder: Finder, random_starts: int = 0, uniform: bool = Fals ordered_categories.remove("find") # test all matching methods of the current finder - def backend_tuples(category_list: list[str], finder: Finder) -> Generator[tuple[str, ...], None, None]: + def backend_tuples( + category_list: list[str], finder: Finder + ) -> Generator[tuple[str, ...], None, None]: if len(category_list) == 0: yield () else: @@ -130,6 +150,7 @@ def backend_tuples(category_list: list[str], finder: Finder) -> Generator[tuple[ for backend in backends: for z in backend_tuples(category_list[1:], finder): yield (backend,) + z + for backend_tuple in backend_tuples(ordered_categories, finder): if backend_tuple in benchmark_blacklist: log.warning("Skipping blacklisted benchmarked backend combination") @@ -138,30 +159,51 @@ def backend_tuples(category_list: list[str], finder: Finder) -> Generator[tuple[ log.info("Benchmark testing with %s", method) for backend, category in zip(backend_tuple, ordered_categories): - finder.configure_backend(backend=backend, category=category, reset=False) + finder.configure_backend( + backend=backend, category=category, reset=False + ) finder.can_calibrate(category, calibration) try: - finder.synchronize_backend(backend=backend, category=category, reset=False) + finder.synchronize_backend( + backend=backend, category=category, reset=False + ) except UnsupportedBackendError as error: - log.debug("Skipping synchronization for %s/backend=%s", category, backend) + log.debug( + "Skipping synchronization for %s/backend=%s", category, backend + ) if random_starts > 0: - self.search(finder, random_starts=random_starts, uniform=uniform, - calibration=calibration, max_attempts=max_attempts, **kwargs) + self.search( + finder, + random_starts=random_starts, + uniform=uniform, + calibration=calibration, + max_attempts=max_attempts, + **kwargs + ) elif calibration: self.calibrate(finder, max_attempts=max_attempts, **kwargs) start_time = time.time() similarity = 1.0 - self.run(finder, **kwargs) total_time = time.time() - start_time - log.debug("Obtained similarity %s from %s in %ss", similarity, method, total_time) + log.debug( + "Obtained similarity %s from %s in %ss", similarity, method, total_time + ) results.append((method, similarity, total_time)) ImageLogger.accumulate_logging = False return sorted(results, key=lambda x: x[1], reverse=True) - def search(self, finder: Finder, random_starts: int = 1, uniform: bool = False, - calibration: bool = True, max_attempts: int = 3, **kwargs: dict[str, type]) -> float: + def search( + self, + finder: Finder, + random_starts: int = 1, + uniform: bool = False, + calibration: bool = True, + max_attempts: int = 3, + **kwargs: dict[str, type] + ) -> float: """ Search for the best match configuration for a given needle and haystack using calibration from random initial conditions. @@ -185,7 +227,9 @@ def search(self, finder: Finder, random_starts: int = 1, uniform: bool = False, best_error = self.run(finder, **kwargs) best_params = init_params = finder.params for i in range(random_starts): - log.info("Random run %s\\%s, best error %s", i+1, random_starts, best_error) + log.info( + "Random run %s\\%s, best error %s", i + 1, random_starts, best_error + ) params = copy.deepcopy(init_params) for category in params.keys(): @@ -197,20 +241,33 @@ def search(self, finder: Finder, random_starts: int = 1, uniform: bool = False, mean = None if uniform else param.value deviation = None if uniform else param.delta param.value = param.random_value(mean, deviation) - log.debug("Setting %s/%s to random value=%s", category, key, param.value) + log.debug( + "Setting %s/%s to random value=%s", + category, + key, + param.value, + ) finder.params = params if calibration: - error = 1.0 - self.calibrate(finder, max_attempts=max_attempts, **kwargs) + error = 1.0 - self.calibrate( + finder, max_attempts=max_attempts, **kwargs + ) else: error = self.run(finder, **kwargs) if error < best_error: - log.info("Random start ended with smaller error %s < %s", error, best_error) + log.info( + "Random start ended with smaller error %s < %s", error, best_error + ) best_error = error best_params = params else: - log.debug("Random start did not end with smaller error %s >= %s", error, best_error) + log.debug( + "Random start did not end with smaller error %s >= %s", + error, + best_error, + ) ImageLogger.accumulate_logging = False log.info("Best error for all random starts is %s", best_error) @@ -220,11 +277,19 @@ def search(self, finder: Finder, random_starts: int = 1, uniform: bool = False, for key in finder.params[category].keys(): param = finder.params[category][key] if hasattr(param, "value"): - log.log(9, "\t%s/%s with value %s +/- delta of %s", - category, key, param.value, param.delta) + log.log( + 9, + "\t%s/%s with value %s +/- delta of %s", + category, + key, + param.value, + param.delta, + ) return 1.0 - best_error - def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, type]) -> float: + def calibrate( + self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, type] + ) -> float: """ Calibrate the available match configuration for a given needle and haystack minimizing the matchign error. @@ -256,7 +321,7 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t log.log(9, "Calibration start with error=%s", best_error) for n in range(max_attempts): - log.info("Try %s\\%s, best error %s", n+1, max_attempts, best_error) + log.info("Try %s\\%s, best error %s", n + 1, max_attempts, best_error) if best_error == 0.0: log.info("Exiting due to zero error") @@ -269,17 +334,30 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t if key == "backend": continue elif not isinstance(param, CVParameter): - log.warning("The parameter %s/%s is not a CV parameter!", category, key) + log.warning( + "The parameter %s/%s is not a CV parameter!", category, key + ) continue elif param.fixed: log.log(9, "Skip fixed parameter: %s/%s", category, key) continue elif isinstance(param.value, str): - log.log(9, "Skip string parameter: %s/%s (calibration not supported)", category, key) + log.log( + 9, + "Skip string parameter: %s/%s (calibration not supported)", + category, + key, + ) continue elif param.delta < param.tolerance: - log.log(9, "The parameter %s/%s has slowed down to %s below tolerance %s", - category, key, param.delta, param.tolerance) + log.log( + 9, + "The parameter %s/%s has slowed down to %s below tolerance %s", + category, + key, + param.delta, + param.tolerance, + ) continue else: slowdown_flag = False @@ -288,15 +366,17 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t # add the delta to the current parameter if isinstance(param.value, float): if param.range[1] is not None: - param.value = min(float(start_value) + param.delta, - param.range[1]) + param.value = min( + float(start_value) + param.delta, param.range[1] + ) else: param.value = float(start_value) + param.delta elif isinstance(param.value, int) and not param.enumerated: intdelta = int(math.ceil(param.delta)) if param.range[1] is not None: - param.value = min(int(start_value) + intdelta, - param.range[1]) + param.value = min( + int(start_value) + intdelta, param.range[1] + ) else: param.value = int(start_value) + intdelta # remaining types require special handling @@ -307,8 +387,17 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t continue param.value = mode error = self.run(finder, **kwargs) - log.log(9, "%s/%s: %s +> %s (delta: %s) = %s (best: %s)", category, key, - start_value, param.value, param.delta, error, best_error) + log.log( + 9, + "%s/%s: %s +> %s (delta: %s) = %s (best: %s)", + category, + key, + start_value, + param.value, + param.delta, + error, + best_error, + ) if error < best_error: best_error = error param.value = mode @@ -322,12 +411,25 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t else: param.value = True else: - raise ValueError("Parameter %s/%s is of unsupported type %s", - category, key, type(param.value)) + raise ValueError( + "Parameter %s/%s is of unsupported type %s", + category, + key, + type(param.value), + ) error = self.run(finder, **kwargs) - log.log(9, "%s/%s: %s +> %s (delta: %s) = %s (best: %s)", category, key, - start_value, param.value, param.delta, error, best_error) + log.log( + 9, + "%s/%s: %s +> %s (delta: %s) = %s (best: %s)", + category, + key, + start_value, + param.value, + param.delta, + error, + best_error, + ) if error < best_error: best_error = error param.delta *= 1.1 @@ -336,15 +438,17 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t if isinstance(param.value, float): if param.range[0] is not None: - param.value = max(float(start_value) - param.delta, - param.range[0]) + param.value = max( + float(start_value) - param.delta, param.range[0] + ) else: param.value = float(start_value) - param.delta elif isinstance(param.value, int): intdelta = int(math.floor(param.delta)) if param.range[0] is not None: - param.value = max(int(start_value) - intdelta, - param.range[0]) + param.value = max( + int(start_value) - intdelta, param.range[0] + ) else: param.value = int(start_value) - intdelta elif isinstance(param.value, bool): @@ -353,8 +457,17 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t continue error = self.run(finder, **kwargs) - log.log(9, "%s/%s: %s -> %s (delta: %s) = %s (best: %s)", category, key, - start_value, param.value, param.delta, error, best_error) + log.log( + 9, + "%s/%s: %s -> %s (delta: %s) = %s (best: %s)", + category, + key, + start_value, + param.value, + param.delta, + error, + best_error, + ) if error < best_error: best_error = error param.delta *= 1.1 @@ -381,8 +494,14 @@ def calibrate(self, finder: Finder, max_attempts: int = 3, **kwargs: dict[str, t delattr(param, "max_delta") elif param.fixed: param.delta = 0.0 - log.log(9, "\t%s/%s with value %s +/- delta of %s", - category, key, param.value, param.delta) + log.log( + 9, + "\t%s/%s with value %s +/- delta of %s", + category, + key, + param.value, + param.delta, + ) return 1.0 - best_error def run_default(self, finder: Finder, **_kwargs: dict[str, type]) -> float: @@ -502,7 +621,10 @@ def _handle_restricted_values(self, finder: Finder) -> None: params["blockSize"].value += 1 if "tdetect" in finder.params: params = finder.params["tdetect"] - if params["backend"] == "east" and params["input_res_x"].value != params["input_res_y"].value: + if ( + params["backend"] == "east" + and params["input_res_x"].value != params["input_res_y"].value + ): params["input_res_x"].value = params["input_res_y"].value if "ocr" in finder.params: params = finder.params["ocr"] diff --git a/guibot/config.py b/guibot/config.py index fb85002a..4dd5a4ae 100644 --- a/guibot/config.py +++ b/guibot/config.py @@ -30,7 +30,7 @@ from .errors import * -log = logging.getLogger('guibot.config') +log = logging.getLogger("guibot.config") class GlobalConfig(type): @@ -91,6 +91,7 @@ def toggle_delay(self, value: float = None) -> float | None: else: GlobalConfig._toggle_delay = value return None + #: time interval between mouse down and up in a click toggle_delay = property(fget=toggle_delay, fset=toggle_delay) @@ -105,10 +106,11 @@ def click_delay(self, value: float = None) -> float | None: else: GlobalConfig._click_delay = value return None + #: time interval after a click (in a double or n-click) click_delay = property(fget=click_delay, fset=click_delay) - def delay_after_drag(self, value: float = None) -> float | None: + def delay_after_drag(self, value: float = None) -> float | None: """ Same as :py:func:`GlobalConfig.toggle_delay` but with @@ -119,6 +121,7 @@ def delay_after_drag(self, value: float = None) -> float | None: else: GlobalConfig._drag_delay = value return None + #: timeout before drag operation delay_after_drag = property(fget=delay_after_drag, fset=delay_after_drag) @@ -133,6 +136,7 @@ def delay_before_drop(self, value: float = None) -> float | None: else: GlobalConfig._drop_delay = value return None + #: timeout before drop operation delay_before_drop = property(fget=delay_before_drop, fset=delay_before_drop) @@ -147,6 +151,7 @@ def delay_before_keys(self, value: float = None) -> float | None: else: GlobalConfig._keys_delay = value return None + #: timeout before key press operation delay_before_keys = property(fget=delay_before_keys, fset=delay_before_keys) @@ -161,6 +166,7 @@ def delay_between_keys(self, value: float = None) -> float | None: else: GlobalConfig._type_delay = value return None + #: time interval between two consecutively typed keys delay_between_keys = property(fget=delay_between_keys, fset=delay_between_keys) @@ -176,8 +182,11 @@ def rescan_speed_on_find(self, value: float = None) -> float | None: else: GlobalConfig._rescan_speed_on_find = value return None + #: time interval between two image matching attempts (used to reduce overhead on the CPU) - rescan_speed_on_find = property(fget=rescan_speed_on_find, fset=rescan_speed_on_find) + rescan_speed_on_find = property( + fget=rescan_speed_on_find, fset=rescan_speed_on_find + ) def wait_for_animations(self, value: bool = None) -> bool | None: """ @@ -198,6 +207,7 @@ def wait_for_animations(self, value: bool = None) -> bool | None: return None else: raise ValueError + #: whether to wait for animations to complete and match only static (not moving) targets wait_for_animations = property(fget=wait_for_animations, fset=wait_for_animations) @@ -219,6 +229,7 @@ def smooth_mouse_drag(self, value: bool = None) -> bool | None: return None else: raise ValueError + #: whether to move the mouse cursor to a location instantly or smoothly smooth_mouse_drag = property(fget=smooth_mouse_drag, fset=smooth_mouse_drag) @@ -239,8 +250,11 @@ def preprocess_special_chars(self, value: bool = None) -> bool | None: return None else: raise ValueError + #: whether to preprocess capital and special characters and handle them internally - preprocess_special_chars = property(fget=preprocess_special_chars, fset=preprocess_special_chars) + preprocess_special_chars = property( + fget=preprocess_special_chars, fset=preprocess_special_chars + ) def save_needle_on_error(self, value: bool = None) -> bool | None: """ @@ -255,8 +269,11 @@ def save_needle_on_error(self, value: bool = None) -> bool | None: return None else: raise ValueError + #: whether to perform an extra needle dump on matching error - save_needle_on_error = property(fget=save_needle_on_error, fset=save_needle_on_error) + save_needle_on_error = property( + fget=save_needle_on_error, fset=save_needle_on_error + ) def image_logging_level(self, value: int = None) -> int | None: """ @@ -272,10 +289,11 @@ def image_logging_level(self, value: int = None) -> int | None: else: GlobalConfig._image_logging_level = value return None + #: logging level similar to the python logging module image_logging_level = property(fget=image_logging_level, fset=image_logging_level) - def image_logging_step_width(self, value: int = None) -> int | None: + def image_logging_step_width(self, value: int = None) -> int | None: """ Same as :py:func:`GlobalConfig.image_logging_level` but with @@ -287,8 +305,11 @@ def image_logging_step_width(self, value: int = None) -> int | None: else: GlobalConfig._image_logging_step_width = value return None + #: number of digits when enumerating the image logging steps, e.g. value=3 for 001, 002, etc. - image_logging_step_width = property(fget=image_logging_step_width, fset=image_logging_step_width) + image_logging_step_width = property( + fget=image_logging_step_width, fset=image_logging_step_width + ) def image_quality(self, value: int = None) -> int | None: """ @@ -303,6 +324,7 @@ def image_quality(self, value: int = None) -> int | None: else: GlobalConfig._image_quality = value return None + #: quality of the image dumps ranging from 0 for no compression to 9 for maximum compression # (used to save space and reduce the disk space needed for image logging) image_quality = property(fget=image_quality, fset=image_quality) @@ -319,8 +341,11 @@ def image_logging_destination(self, value: str = None) -> str | None: else: GlobalConfig._image_logging_destination = value return None + #: relative path of the image logging steps - image_logging_destination = property(fget=image_logging_destination, fset=image_logging_destination) + image_logging_destination = property( + fget=image_logging_destination, fset=image_logging_destination + ) def display_control_backend(self, value: str = None) -> str | None: """ @@ -353,8 +378,11 @@ def display_control_backend(self, value: str = None) -> str | None: raise ValueError("Unsupported backend for GUI actions '%s'" % value) GlobalConfig._display_control_backend = value return None + #: name of the display control backend - display_control_backend = property(fget=display_control_backend, fset=display_control_backend) + display_control_backend = property( + fget=display_control_backend, fset=display_control_backend + ) # these methods do not check for valid values since this # is already done during region and target initialization @@ -389,6 +417,7 @@ def find_backend(self, value: str = None) -> str | None: else: GlobalConfig._find_backend = value return None + #: name of the computer vision backend find_backend = property(fget=find_backend, fset=find_backend) @@ -405,8 +434,11 @@ def contour_threshold_backend(self, value: str = None) -> str | None: else: GlobalConfig._contour_threshold_backend = value return None + #: name of the contour threshold backend - contour_threshold_backend = property(fget=contour_threshold_backend, fset=contour_threshold_backend) + contour_threshold_backend = property( + fget=contour_threshold_backend, fset=contour_threshold_backend + ) def template_match_backend(self, value: str = None) -> str | None: """ @@ -422,8 +454,11 @@ def template_match_backend(self, value: str = None) -> str | None: else: GlobalConfig._template_match_backend = value return None + #: name of the template matching backend - template_match_backend = property(fget=template_match_backend, fset=template_match_backend) + template_match_backend = property( + fget=template_match_backend, fset=template_match_backend + ) def feature_detect_backend(self, value: str = None) -> str | None: """ @@ -439,8 +474,11 @@ def feature_detect_backend(self, value: str = None) -> str | None: else: GlobalConfig._feature_detect_backend = value return None + #: name of the feature detection backend - feature_detect_backend = property(fget=feature_detect_backend, fset=feature_detect_backend) + feature_detect_backend = property( + fget=feature_detect_backend, fset=feature_detect_backend + ) def feature_extract_backend(self, value: str = None) -> str | None: """ @@ -455,8 +493,11 @@ def feature_extract_backend(self, value: str = None) -> str | None: else: GlobalConfig._feature_extract_backend = value return None + #: name of the feature extraction backend - feature_extract_backend = property(fget=feature_extract_backend, fset=feature_extract_backend) + feature_extract_backend = property( + fget=feature_extract_backend, fset=feature_extract_backend + ) def feature_match_backend(self, value: str = None) -> str | None: """ @@ -471,8 +512,11 @@ def feature_match_backend(self, value: str = None) -> str | None: else: GlobalConfig._feature_match_backend = value return None + #: name of the feature matching backend - feature_match_backend = property(fget=feature_match_backend, fset=feature_match_backend) + feature_match_backend = property( + fget=feature_match_backend, fset=feature_match_backend + ) def text_detect_backend(self, value: str = None) -> str | None: """ @@ -487,6 +531,7 @@ def text_detect_backend(self, value: str = None) -> str | None: else: GlobalConfig._text_detect_backend = value return None + #: name of the text detection backend text_detect_backend = property(fget=text_detect_backend, fset=text_detect_backend) @@ -503,6 +548,7 @@ def text_ocr_backend(self, value: str = None) -> str | None: else: GlobalConfig._text_ocr_backend = value return None + #: name of the optical character recognition backend text_ocr_backend = property(fget=text_ocr_backend, fset=text_ocr_backend) @@ -519,6 +565,7 @@ def deep_learn_backend(self, value: str = None) -> str | None: else: GlobalConfig._deep_learn_backend = value return None + #: name of the deep learning backend deep_learn_backend = property(fget=deep_learn_backend, fset=deep_learn_backend) @@ -535,8 +582,11 @@ def hybrid_match_backend(self, value: str = None) -> str | None: else: GlobalConfig._hybrid_match_backend = value return None + #: name of the hybrid matching backend for unconfigured one-step targets - hybrid_match_backend = property(fget=hybrid_match_backend, fset=hybrid_match_backend) + hybrid_match_backend = property( + fget=hybrid_match_backend, fset=hybrid_match_backend + ) class GlobalConfig(object, metaclass=GlobalConfig): # type: ignore @@ -639,24 +689,30 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: if synchronize: self.__synchronize_backend() - def __configure_backend(self, backend: str = None, category: str ="type", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "type", reset: bool = False + ) -> None: if category != "type": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: # reset makes no sense here since this is the base configuration pass if backend is None: backend = "cv" if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) self.params[category] = {} self.params[category]["backend"] = backend - def configure_backend(self, backend: str = None, category: str = "type", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "type", reset: bool = False + ) -> None: """ Generate configuration dictionary for a given backend. @@ -681,20 +737,26 @@ def configure(self, reset: bool = True, **kwargs: dict[str, type]) -> None: """ self.configure_backend(reset=reset) - def __synchronize_backend(self, backend: str = None, category: str = "type", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "type", reset: bool = False + ) -> None: if category != "type": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: # reset makes no sense here since this is the base configuration pass # no backend object to sync to backend = "cv" if backend is None else backend if backend not in self.algorithms[self.categories[category]]: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) - def synchronize_backend(self, backend: str = None, category: str = "type", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "type", reset: bool = False + ) -> None: """ Synchronize a category backend with the equalizer configuration. @@ -707,7 +769,9 @@ def synchronize_backend(self, backend: str = None, category: str = "type", """ self.__synchronize_backend(backend, category, reset) - def synchronize(self, *args: tuple[type, ...], reset: bool = True, **kwargs: dict[str, type]) -> None: + def synchronize( + self, *args: tuple[type, ...], reset: bool = True, **kwargs: dict[str, type] + ) -> None: """ Synchronize all backends with the current configuration dictionary. diff --git a/guibot/controller.py b/guibot/controller.py index e5a08bcc..a3824b40 100644 --- a/guibot/controller.py +++ b/guibot/controller.py @@ -40,9 +40,14 @@ from .errors import * -log = logging.getLogger('guibot.controller') -__all__ = ['Controller', 'AutoPyController', 'XDoToolController', - 'VNCDoToolController', 'PyAutoGUIController'] +log = logging.getLogger("guibot.controller") +__all__ = [ + "Controller", + "AutoPyController", + "XDoToolController", + "VNCDoToolController", + "PyAutoGUIController", +] class Controller(LocalConfig): @@ -57,8 +62,12 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: # available and currently fully compatible methods self.categories["control"] = "control_methods" - self.algorithms["control_methods"] = ["autopy", "pyautogui", - "xdotool", "vncdotool"] + self.algorithms["control_methods"] = [ + "autopy", + "pyautogui", + "xdotool", + "vncdotool", + ] # other attributes self._backend_obj = None @@ -83,6 +92,7 @@ def get_width(self) -> int: :returns: width of the connected screen """ return self._width + width = property(fget=get_width) def get_height(self) -> int: @@ -92,6 +102,7 @@ def get_height(self) -> int: :returns: height of the connected screen """ return self._height + height = property(fget=get_height) def get_keymap(self) -> inputmap.Key: @@ -101,6 +112,7 @@ def get_keymap(self) -> inputmap.Key: :returns: map of keys to be used for the connected screen """ return self._keymap + keymap = property(fget=get_keymap) def get_mousemap(self) -> inputmap.MouseButton: @@ -110,6 +122,7 @@ def get_mousemap(self) -> inputmap.MouseButton: :returns: map of mouse buttons to be used for the connected screen """ return self._mousemap + mousemap = property(fget=get_mousemap) def get_modmap(self) -> inputmap.KeyModifier: @@ -119,6 +132,7 @@ def get_modmap(self) -> inputmap.KeyModifier: :returns: map of modifier keys to be used for the connected screen """ return self._modmap + modmap = property(fget=get_modmap) def get_mouse_location(self) -> Location: @@ -128,27 +142,34 @@ def get_mouse_location(self) -> Location: :returns: location of the mouse pointer """ return self._pointer + mouse_location = property(fget=get_mouse_location) - def __configure_backend(self, backend: str = None, category: str = "control", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "control", reset: bool = False + ) -> None: if category != "control": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(Controller, self).configure_backend("dc", reset=True) if backend is None: backend = GlobalConfig.display_control_backend if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) log.log(9, "Setting backend for %s to %s", category, backend) self.params[category] = {} self.params[category]["backend"] = backend log.log(9, "%s %s\n", category, self.params[category]) - def configure_backend(self, backend: str = None, category: str = "control", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "control", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -156,17 +177,23 @@ def configure_backend(self, backend: str = None, category: str = "control", """ self.__configure_backend(backend, category, reset) - def __synchronize_backend(self, backend: str = None, category: str = "control", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "control", reset: bool = False + ) -> None: if category != "control": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(Controller, self).synchronize_backend("dc", reset=True) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) - def synchronize_backend(self, backend: str = None, category: str = "control", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "control", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -207,7 +234,7 @@ def _region_from_args(self, *args: "Region") -> tuple[int, int, int, int, str]: height = self._height - ypos # TODO: Switch to in-memory conversion - patch backends or request get_raw() from authors - with NamedTemporaryFile(prefix='guibot', suffix='.png') as f: + with NamedTemporaryFile(prefix="guibot", suffix=".png") as f: # NOTE: the file can be open twice on unix but only once on windows so simply # use the generated filename to avoid this difference and remove it manually filename = f.name @@ -222,7 +249,9 @@ def capture_screen(self, *args: "list[int] | Region | None") -> Image: :returns: image of the current screen :raises: :py:class:`NotImplementedError` if the base class method is called """ - raise NotImplementedError("Method is not available for this controller implementation") + raise NotImplementedError( + "Method is not available for this controller implementation" + ) def mouse_move(self, location: Location, smooth: bool = True) -> None: """ @@ -232,9 +261,13 @@ def mouse_move(self, location: Location, smooth: bool = True) -> None: :param smooth: whether to sue smooth transition or just teleport the mouse :raises: :py:class:`NotImplementedError` if the base class method is called """ - raise NotImplementedError("Method is not available for this controller implementation") + raise NotImplementedError( + "Method is not available for this controller implementation" + ) - def mouse_click(self, button: int = None, count: int = 1, modifiers: list[str] = None) -> None: + def mouse_click( + self, button: int = None, count: int = 1, modifiers: list[str] = None + ) -> None: """ Click the selected mouse button N times at the current mouse location. @@ -244,7 +277,9 @@ def mouse_click(self, button: int = None, count: int = 1, modifiers: list[str] = (see :py:class:`inputmap.KeyModifier` for extensive list) :raises: :py:class:`NotImplementedError` if the base class method is called """ - raise NotImplementedError("Method is not available for this controller implementation") + raise NotImplementedError( + "Method is not available for this controller implementation" + ) def mouse_down(self, button: int) -> None: """ @@ -254,7 +289,9 @@ def mouse_down(self, button: int) -> None: (see :py:class:`inputmap.MouseButton` for extensive list) :raises: :py:class:`NotImplementedError` if the base class method is called """ - raise NotImplementedError("Method is not available for this controller implementation") + raise NotImplementedError( + "Method is not available for this controller implementation" + ) def mouse_up(self, button: int) -> None: """ @@ -264,7 +301,9 @@ def mouse_up(self, button: int) -> None: (see :py:class:`inputmap.MouseButton` for extensive list) :raises: :py:class:`NotImplementedError` if the base class method is called """ - raise NotImplementedError("Method is not available for this controller implementation") + raise NotImplementedError( + "Method is not available for this controller implementation" + ) def mouse_scroll(self, clicks: int = 10, horizontal: bool = False) -> None: """ @@ -275,7 +314,9 @@ def mouse_scroll(self, clicks: int = 10, horizontal: bool = False) -> None: (only available on some platforms) :raises: :py:class:`NotImplementedError` if the base class method is called """ - raise NotImplementedError("Method is not available for this controller implementation") + raise NotImplementedError( + "Method is not available for this controller implementation" + ) def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None: """ @@ -286,7 +327,9 @@ def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None: :param up_down: hold down if true else release :raises: :py:class:`NotImplementedError` if the base class method is called """ - raise NotImplementedError("Method is not available for this controller implementation") + raise NotImplementedError( + "Method is not available for this controller implementation" + ) def keys_press(self, keys: list[str] | str) -> None: """ @@ -308,7 +351,9 @@ def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None: (see :py:class:`inputmap.KeyModifier` for extensive list) :raises: :py:class:`NotImplementedError` if the base class method is called """ - raise NotImplementedError("Method is not available for this controller implementation") + raise NotImplementedError( + "Method is not available for this controller implementation" + ) class AutoPyController(Controller): @@ -334,23 +379,32 @@ def get_mouse_location(self) -> Location: loc = self._backend_obj.mouse.location() # newer versions do their own scale conversion version = self._backend_obj.__version__.split(".") - if int(version[0]) > 3 or int(version[0]) == 3 and (int(version[1]) > 0 or int(version[2]) > 0): + if ( + int(version[0]) > 3 + or int(version[0]) == 3 + and (int(version[1]) > 0 or int(version[2]) > 0) + ): return Location(int(loc[0] * self._scale), int(loc[1] * self._scale)) return Location(int(loc[0] / self._scale), int(loc[1] / self._scale)) + mouse_location = property(fget=get_mouse_location) - def __configure_backend(self, backend: str = None, category: str = "autopy", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "autopy", reset: bool = False + ) -> None: if category != "autopy": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(AutoPyController, self).configure_backend("autopy", reset=True) self.params[category] = {} self.params[category]["backend"] = "none" - def configure_backend(self, backend: str = None, category: str = "autopy", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "autopy", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -358,16 +412,22 @@ def configure_backend(self, backend: str = None, category: str = "autopy", """ self.__configure_backend(backend, category, reset) - def __synchronize_backend(self, backend: str = None, category: str = "autopy", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "autopy", reset: bool = False + ) -> None: if category != "autopy": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(AutoPyController, self).synchronize_backend("autopy", reset=True) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) import autopy + self._backend_obj = autopy self._scale = self._backend_obj.screen.scale() @@ -379,8 +439,9 @@ def __synchronize_backend(self, backend: str = None, category: str = "autopy", self._modmap = inputmap.AutoPyKeyModifier() self._mousemap = inputmap.AutoPyMouseButton() - def synchronize_backend(self, backend: str = None, category: str = "autopy", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "autopy", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -397,17 +458,28 @@ def capture_screen(self, *args: "list[int] | Region | None") -> Image: xpos, ypos, width, height, filename = self._region_from_args(*args) # autopy works in points and requires a minimum of one point along a dimension - xpos, ypos, width, height = xpos / self._scale, ypos / self._scale, width / self._scale, height / self._scale - xpos, ypos = float(xpos) - (1.0 - float(width)) if width < 1.0 else xpos, float(ypos) - (1.0 - float(height)) if height < 1.0 else ypos - height, width = 1.0 if float(height) < 1.0 else height, 1.0 if float(width) < 1.0 else width + xpos, ypos, width, height = ( + xpos / self._scale, + ypos / self._scale, + width / self._scale, + height / self._scale, + ) + xpos, ypos = float(xpos) - (1.0 - float(width)) if width < 1.0 else xpos, ( + float(ypos) - (1.0 - float(height)) if height < 1.0 else ypos + ) + height, width = 1.0 if float(height) < 1.0 else height, ( + 1.0 if float(width) < 1.0 else width + ) try: - autopy_bmp = self._backend_obj.bitmap.capture_screen(((xpos, ypos), (width, height))) + autopy_bmp = self._backend_obj.bitmap.capture_screen( + ((xpos, ypos), (width, height)) + ) except ValueError: - return Image("", PIL.Image.new('RGB', (1, 1))) + return Image("", PIL.Image.new("RGB", (1, 1))) autopy_bmp.save(filename) with PIL.Image.open(filename) as f: - pil_image = f.convert('RGB') + pil_image = f.convert("RGB") os.unlink(filename) return Image("", pil_image) @@ -424,8 +496,9 @@ def mouse_move(self, location: Location, smooth: bool = True) -> None: self._backend_obj.mouse.move(x, y) self._pointer = location - def mouse_click(self, button: int = None, count: int = 1, - modifiers: list[str] = None) -> None: + def mouse_click( + self, button: int = None, count: int = 1, modifiers: list[str] = None + ) -> None: """ Custom implementation of the base method. @@ -514,12 +587,16 @@ def get_mouse_location(self) -> Location: x = re.search(r"x:(\d+)", pos).group(1) y = re.search(r"y:(\d+)", pos).group(1) return Location(int(x), int(y)) + mouse_location = property(fget=get_mouse_location) - def __configure_backend(self, backend: str = None, category: str = "xdotool", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "xdotool", reset: bool = False + ) -> None: if category != "xdotool": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(XDoToolController, self).configure_backend("xdotool", reset=True) @@ -527,8 +604,9 @@ def __configure_backend(self, backend: str = None, category: str = "xdotool", self.params[category]["backend"] = "none" self.params[category]["binary"] = "xdotool" - def configure_backend(self, backend: str = None, category: str = "xdotool", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "xdotool", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -536,24 +614,32 @@ def configure_backend(self, backend: str = None, category: str = "xdotool", """ self.__configure_backend(backend, category, reset) - def __synchronize_backend(self, backend: str = None, category: str = "xdotool", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "xdotool", reset: bool = False + ) -> None: if category != "xdotool": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(XDoToolController, self).synchronize_backend("xdotool", reset=True) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) import subprocess + class XDoTool(object): def __init__(self, dc: Controller) -> None: self.dc = dc + def run(self, command: str, *args: list[str]) -> str: process = [self.dc.params[category]["binary"]] process += [command] process += args return subprocess.check_output(process, shell=False).decode() + self._backend_obj = XDoTool(self) self._width, self._height = self._backend_obj.run("getdisplaygeometry").split() @@ -563,8 +649,9 @@ def run(self, command: str, *args: list[str]) -> str: self._modmap = inputmap.XDoToolKeyModifier() self._mousemap = inputmap.XDoToolMouseButton() - def synchronize_backend(self, backend: str = None, category: str = "xdotool", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "xdotool", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -580,10 +667,22 @@ def capture_screen(self, *args: "list[int] | Region | None") -> Image: """ xpos, ypos, width, height, filename = self._region_from_args(*args) import subprocess - with subprocess.Popen(("xwd", "-silent", "-root"), stdout=subprocess.PIPE) as xwd: - subprocess.call(("convert", "xwd:-", "-crop", "%sx%s+%s+%s" % (width, height, xpos, ypos), filename), stdin=xwd.stdout) + + with subprocess.Popen( + ("xwd", "-silent", "-root"), stdout=subprocess.PIPE + ) as xwd: + subprocess.call( + ( + "convert", + "xwd:-", + "-crop", + "%sx%s+%s+%s" % (width, height, xpos, ypos), + filename, + ), + stdin=xwd.stdout, + ) with PIL.Image.open(filename) as f: - pil_image = f.convert('RGB') + pil_image = f.convert("RGB") os.unlink(filename) return Image("", pil_image) @@ -595,16 +694,19 @@ def mouse_move(self, location: Location, smooth: bool = True) -> None: """ if smooth: # TODO: implement smooth mouse move? - log.warning("Smooth mouse move is not supported for the XDO controller," - " defaulting to instant mouse move") + log.warning( + "Smooth mouse move is not supported for the XDO controller," + " defaulting to instant mouse move" + ) self._backend_obj.run("mousemove", str(location.x), str(location.y)) # handle race conditions where the backend coordinates are updated too # slowly by giving some time for the new location to take effect there time.sleep(0.3) self._pointer = location - def mouse_click(self, button: int = None, count: int = 1, - modifiers: list[str] = None) -> None: + def mouse_click( + self, button: int = None, count: int = 1, modifiers: list[str] = None + ) -> None: """ Custom implementation of the base method. @@ -649,9 +751,9 @@ def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None: """ for key in keys: if up_down: - self._backend_obj.run('keydown', str(key)) + self._backend_obj.run("keydown", str(key)) else: - self._backend_obj.run('keyup', str(key)) + self._backend_obj.run("keyup", str(key)) def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None: """ @@ -663,7 +765,7 @@ def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None: self.keys_toggle(modifiers, True) for part in text: - self._backend_obj.run('type', str(part)) + self._backend_obj.run("type", str(part)) if modifiers is not None: self.keys_toggle(modifiers, False) @@ -683,9 +785,13 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: if synchronize: self.__synchronize_backend(reset=False) - def __configure_backend(self, backend: str = None, category: str = "vncdotool", reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "vncdotool", reset: bool = False + ) -> None: if category != "vncdotool": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(VNCDoToolController, self).configure_backend("vncdotool", reset=True) @@ -698,8 +804,9 @@ def __configure_backend(self, backend: str = None, category: str = "vncdotool", # password for the vnc server self.params[category]["vnc_password"] = None - def configure_backend(self, backend: str = None, category: str = "vncdotool", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "vncdotool", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -707,33 +814,46 @@ def configure_backend(self, backend: str = None, category: str = "vncdotool", """ self.__configure_backend(backend, category, reset) - def __synchronize_backend(self, backend: str = None, category: str = "vncdotool", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "vncdotool", reset: bool = False + ) -> None: if category != "vncdotool": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: - super(VNCDoToolController, self).synchronize_backend("vncdotool", reset=True) + super(VNCDoToolController, self).synchronize_backend( + "vncdotool", reset=True + ) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) from vncdotool import api + if self._backend_obj: # api.connect() gives us a threaded client, so we need to clean up resources # to avoid dangling connections and deadlocks if synchronizing more than once self._backend_obj.disconnect() - self._backend_obj = api.connect('%s:%i' % (self.params[category]["vnc_hostname"], - self.params[category]["vnc_port"]), - self.params[category]["vnc_password"]) + self._backend_obj = api.connect( + "%s:%i" + % ( + self.params[category]["vnc_hostname"], + self.params[category]["vnc_port"], + ), + self.params[category]["vnc_password"], + ) # for special characters preprocessing for the vncdotool self._backend_obj.factory.force_caps = True # additional logging for vncdotool available so let's make use of it - logging.getLogger('vncdotool.client').setLevel(10) - logging.getLogger('vncdotool').setLevel(logging.ERROR) - logging.getLogger('twisted').setLevel(logging.ERROR) + logging.getLogger("vncdotool.client").setLevel(10) + logging.getLogger("vncdotool").setLevel(logging.ERROR) + logging.getLogger("twisted").setLevel(logging.ERROR) # screen size - with NamedTemporaryFile(prefix='guibot', suffix='.png') as f: + with NamedTemporaryFile(prefix="guibot", suffix=".png") as f: filename = f.name screen = self._backend_obj.captureScreen(filename) os.unlink(filename) @@ -749,8 +869,9 @@ def __synchronize_backend(self, backend: str = None, category: str = "vncdotool" self._modmap = inputmap.VNCDoToolKeyModifier() self._mousemap = inputmap.VNCDoToolMouseButton() - def synchronize_backend(self, backend: str = None, category: str = "vncdotool", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "vncdotool", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -766,8 +887,10 @@ def capture_screen(self, *args: "list[int] | Region | None") -> Image: """ xpos, ypos, width, height, _ = self._region_from_args(*args) self._backend_obj.refreshScreen() - cropped = self._backend_obj.screen.crop((xpos, ypos, xpos + width, ypos + height)) - pil_image = cropped.convert('RGB') + cropped = self._backend_obj.screen.crop( + (xpos, ypos, xpos + width, ypos + height) + ) + pil_image = cropped.convert("RGB") return Image("", pil_image) def mouse_move(self, location: Location, smooth: bool = True) -> None: @@ -782,8 +905,9 @@ def mouse_move(self, location: Location, smooth: bool = True) -> None: self._backend_obj.mouseMove(location.x, location.y) self._pointer = location - def mouse_click(self, button: int = None, count: int = 1, - modifiers: list[str] = None) -> None: + def mouse_click( + self, button: int = None, count: int = 1, modifiers: list[str] = None + ) -> None: """ Custom implementation of the base method. @@ -829,17 +953,17 @@ def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None: """ for key in keys: if key == "\\": - key = 'bslash' + key = "bslash" elif key == "/": - key = 'fslash' + key = "fslash" elif key == " ": - key = 'space' + key = "space" if up_down: self._backend_obj.keyDown(key) else: self._backend_obj.keyUp(key) - def keys_type(self, text: list[str] | str, modifiers: list [str] = None) -> None: + def keys_type(self, text: list[str] | str, modifiers: list[str] = None) -> None: """ Custom implementation of the base method. @@ -851,13 +975,13 @@ def keys_type(self, text: list[str] | str, modifiers: list [str] = None) -> None for part in text: for char in str(part): if char == "\\": - char = 'bslash' + char = "bslash" elif char == "/": - char = 'fslash' + char = "fslash" elif char == " ": - char = 'space' + char = "space" elif char == "\n": - char = 'return' + char = "return" time.sleep(GlobalConfig.delay_between_keys) self._backend_obj.keyPress(char) @@ -887,20 +1011,25 @@ def get_mouse_location(self) -> Location: """ x, y = self._backend_obj.position() return Location(x, y) + mouse_location = property(fget=get_mouse_location) - def __configure_backend(self, backend: str = None, category: str = "pyautogui", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "pyautogui", reset: bool = False + ) -> None: if category != "pyautogui": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(PyAutoGUIController, self).configure_backend("pyautogui", reset=True) self.params[category] = {} self.params[category]["backend"] = "none" - def configure_backend(self, backend: str = None, category: str = "pyautogui", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "pyautogui", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -908,16 +1037,24 @@ def configure_backend(self, backend: str = None, category: str = "pyautogui", """ self.__configure_backend(backend, category, reset) - def __synchronize_backend(self, backend: str = None, category: str = "pyautogui", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "pyautogui", reset: bool = False + ) -> None: if category != "pyautogui": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: - super(PyAutoGUIController, self).synchronize_backend("pyautogui", reset=True) + super(PyAutoGUIController, self).synchronize_backend( + "pyautogui", reset=True + ) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) import pyautogui + # allow for (0,0) and edge coordinates pyautogui.FAILSAFE = False self._backend_obj = pyautogui @@ -928,8 +1065,9 @@ def __synchronize_backend(self, backend: str = None, category: str = "pyautogui" self._modmap = inputmap.PyAutoGUIKeyModifier() self._mousemap = inputmap.PyAutoGUIMouseButton() - def synchronize_backend(self, backend: str = None, category: str = "pyautogui", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "pyautogui", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -960,8 +1098,9 @@ def mouse_move(self, location: Location, smooth: bool = True) -> None: self._backend_obj.moveTo(location.x, location.y) self._pointer = location - def mouse_click(self, button: int = None, count: int = 1, - modifiers: list[str] = None) -> None: + def mouse_click( + self, button: int = None, count: int = 1, modifiers: list[str] = None + ) -> None: """ Custom implementation of the base method. @@ -1010,7 +1149,7 @@ def mouse_scroll(self, clicks: int = 10, horizontal: bool = False) -> None: else: self._backend_obj.scroll(clicks) - def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None: + def keys_toggle(self, keys: list[str] | str, up_down: bool) -> None: """ Custom implementation of the base method. diff --git a/guibot/desktopcontrol.py b/guibot/desktopcontrol.py index 70c85288..0e41e286 100644 --- a/guibot/desktopcontrol.py +++ b/guibot/desktopcontrol.py @@ -30,8 +30,9 @@ from .controller import * -logging.getLogger("guibot.desktopcontrol")\ - .warn("The `desktopcontrol` module is deprecated, use `controller` instead.") +logging.getLogger("guibot.desktopcontrol").warn( + "The `desktopcontrol` module is deprecated, use `controller` instead." +) DesktopControl = Controller diff --git a/guibot/errors.py b/guibot/errors.py index bf7adb0c..391a9617 100644 --- a/guibot/errors.py +++ b/guibot/errors.py @@ -26,11 +26,17 @@ """ -__all__ = ['GuiBotError', 'FileNotFoundError', - 'IncompatibleTargetError', 'IncompatibleTargetFileError', - 'FindError', 'NotFindError', - 'UnsupportedBackendError', 'MissingHotmapError', - 'UninitializedBackendError'] +__all__ = [ + "GuiBotError", + "FileNotFoundError", + "IncompatibleTargetError", + "IncompatibleTargetFileError", + "FindError", + "NotFindError", + "UnsupportedBackendError", + "MissingHotmapError", + "UninitializedBackendError", +] class GuiBotError(Exception): @@ -75,7 +81,10 @@ def __init__(self, failed_target: "Target" = None) -> None: :param failed_target: the target that was found """ if failed_target: - message = "The target %s was found on the screen while it was not expected" % failed_target + message = ( + "The target %s was found on the screen while it was not expected" + % failed_target + ) else: message = "The target was found on the screen while it was not expected" super(NotFindError, self).__init__(message) diff --git a/guibot/fileresolver.py b/guibot/fileresolver.py index d54d2316..0549e351 100644 --- a/guibot/fileresolver.py +++ b/guibot/fileresolver.py @@ -32,7 +32,7 @@ import logging -log = logging.getLogger('guibot.path') +log = logging.getLogger("guibot.path") class FileResolver(object): @@ -78,7 +78,9 @@ def clear(self) -> None: # empty list but keep reference del FileResolver._target_paths[:] - def search(self, filename: str, restriction: str = "", silent: bool = False) -> str | None: + def search( + self, filename: str, restriction: str = "", silent: bool = False + ) -> str | None: """ Search for a filename in the currently accessible paths. @@ -97,32 +99,32 @@ def search(self, filename: str, restriction: str = "", silent: bool = False) -> return fullname # Check with .png extension for images - fullname = os.path.join(directory, filename + '.png') + fullname = os.path.join(directory, filename + ".png") if os.path.exists(fullname): return fullname # Check with .xml extension for cascade - fullname = os.path.join(directory, filename + '.xml') + fullname = os.path.join(directory, filename + ".xml") if os.path.exists(fullname): return fullname # Check with .txt extension for text - fullname = os.path.join(directory, filename + '.txt') + fullname = os.path.join(directory, filename + ".txt") if os.path.exists(fullname): return fullname # Check with .csv extension for patterns - fullname = os.path.join(directory, filename + '.csv') + fullname = os.path.join(directory, filename + ".csv") if os.path.exists(fullname): return fullname # Check with .steps extension for chains - fullname = os.path.join(directory, filename + '.steps') + fullname = os.path.join(directory, filename + ".steps") if os.path.exists(fullname): return fullname if not silent: - raise FileNotFoundError('File ' + filename + ' not found') + raise FileNotFoundError("File " + filename + " not found") return None @@ -170,7 +172,10 @@ def __enter__(self) -> FileResolver: file_resolver.add_path(p) return file_resolver - def __exit__(self, *args: tuple[type, ...],) -> None: + def __exit__( + self, + *args: tuple[type, ...], + ) -> None: """ Exit this context and restore the original paths. diff --git a/guibot/finder.py b/guibot/finder.py index 26025e75..c4c9c58e 100644 --- a/guibot/finder.py +++ b/guibot/finder.py @@ -42,22 +42,38 @@ from .location import Location import logging -log = logging.getLogger('guibot.finder') +log = logging.getLogger("guibot.finder") -__all__ = ['CVParameter', 'Finder', 'AutoPyFinder', 'ContourFinder', 'TemplateFinder', - 'FeatureFinder', 'CascadeFinder', 'TextFinder', 'TemplateFeatureFinder', - 'DeepFinder', 'HybridFinder'] + +__all__ = [ + "CVParameter", + "Finder", + "AutoPyFinder", + "ContourFinder", + "TemplateFinder", + "FeatureFinder", + "CascadeFinder", + "TextFinder", + "TemplateFeatureFinder", + "DeepFinder", + "HybridFinder", +] class CVParameter(object): """A class for a single parameter used for CV backend configuration.""" - def __init__(self, value: bool | int | float | str | None, - min_val: type["value"] = None, - max_val: type["value"] = None, - delta: float = 10.0, tolerance: float = 1.0, - fixed: bool = True, enumerated: bool = False) -> None: + def __init__( + self, + value: bool | int | float | str | None, + min_val: type["value"] = None, + max_val: type["value"] = None, + delta: float = 10.0, + tolerance: float = 1.0, + fixed: bool = True, + enumerated: bool = False, + ) -> None: """ Build a computer vision parameter. @@ -105,7 +121,9 @@ def __init__(self, value: bool | int | float | str | None, # enumerable (e.g. modes) or range value self.enumerated = enumerated if self.enumerated and (self.min_val is None or self.max_val is None): - raise ValueError("Enumerated parameters must have a finite (usually small) range") + raise ValueError( + "Enumerated parameters must have a finite (usually small) range" + ) def __repr__(self) -> str: """ @@ -113,8 +131,18 @@ def __repr__(self) -> str: :returns: special syntax representation of the parameter """ - return ("" - % (self.value, self.min_val, self.max_val, self.delta, self.tolerance, self.fixed, self.enumerated)) + return ( + "" + % ( + self.value, + self.min_val, + self.max_val, + self.delta, + self.tolerance, + self.fixed, + self.enumerated, + ) + ) def __eq__(self, other: "CVParameter") -> bool: """ @@ -136,9 +164,11 @@ def from_string(raw: str) -> "CVParameter": :raises: :py:class:`ValueError` if unsupported type is encountered """ args = [] - string_args = re.match(r"", - raw).group(1, 2, 3, 4, 5, 6) + string_args = re.match( + r"", + raw, + ).group(1, 2, 3, 4, 5, 6) for arg in string_args: if arg == "None": @@ -160,8 +190,11 @@ def from_string(raw: str) -> "CVParameter": log.log(9, "%s", args) return CVParameter(*args) - def random_value(self, mu: bool | int | float | str = None, - sigma: bool | int | float | str = None) -> bool | int | float | str | None: + def random_value( + self, + mu: bool | int | float | str = None, + sigma: bool | int | float | str = None, + ) -> bool | int | float | str | None: """ Return a random value of the CV parameter given its range and type. @@ -177,21 +210,23 @@ def random_value(self, mu: bool | int | float | str = None, if mu is None or self.enumerated: return random.uniform(self.range[0], self.range[1]) elif sigma is None: - return min(max(random.gauss(mu, (start-end)/4), start), end) + return min(max(random.gauss(mu, (start - end) / 4), start), end) else: return min(max(random.gauss(mu, sigma), start), end) elif isinstance(self.value, int): if mu is None or self.enumerated: return random.randint(start, end) elif sigma is None: - return min(max(int(random.gauss(mu, (start-end)/4)), start), end) + return min(max(int(random.gauss(mu, (start - end) / 4)), start), end) else: return min(max(int(random.gauss(mu, sigma)), start), end) elif isinstance(self.value, bool): value = random.randint(0, 1) return value == 1 else: - log.warning("Cannot generate random value for CV parameters other than float, int, and bool") + log.warning( + "Cannot generate random value for CV parameters other than float, int, and bool" + ) return self.value @@ -234,7 +269,7 @@ def from_match_file(filename: str) -> "Finder": if not parser.has_section("find"): raise IOError("No image matching configuration can be found") try: - backend_name = parser.get("find", 'backend') + backend_name = parser.get("find", "backend") except config.NoOptionError: backend_name = GlobalConfig.find_backend @@ -261,9 +296,11 @@ def from_match_file(filename: str) -> "Finder": for category in finder.params.keys(): if parser.has_section(category): - section_backend = parser.get(category, 'backend') + section_backend = parser.get(category, "backend") if section_backend != finder.params[category]["backend"]: - finder.configure_backend(backend=section_backend, category=category, reset=False) + finder.configure_backend( + backend=section_backend, category=category, reset=False + ) for option in parser.options(category): if option == "backend": continue @@ -294,14 +331,14 @@ def to_match_file(finder: "Finder", filename: str) -> None: for section in sections: if not parser.has_section(section): parser.add_section(section) - parser.set(section, 'backend', finder.params[section]["backend"]) + parser.set(section, "backend", finder.params[section]["backend"]) for option in finder.params[section]: log.log(9, "%s %s", section, option) parser.set(section, option, finder.params[section][option]) if not filename.endswith(".match"): filename += ".match" - with open(filename, 'w') as configfile: + with open(filename, "w") as configfile: configfile.write("# IMAGE MATCH DATA\n") parser.write(configfile) @@ -311,8 +348,17 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: # available and currently fully compatible methods self.categories["find"] = "find_methods" - self.algorithms["find_methods"] = ["autopy", "contour", "template", "feature", - "cascade", "text", "tempfeat", "deep", "hybrid"] + self.algorithms["find_methods"] = [ + "autopy", + "contour", + "template", + "feature", + "cascade", + "text", + "tempfeat", + "deep", + "hybrid", + ] # other attributes self.imglog = ImageLogger() @@ -322,17 +368,22 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: if configure: self.__configure_backend(reset=True) - def __configure_backend(self, backend: str = None, category: str = "find", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "find", reset: bool = False + ) -> None: if category != "find": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(Finder, self).configure_backend(backend="cv", reset=True) if backend is None: backend = GlobalConfig.find_backend if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) log.log(9, "Setting backend for %s to %s", category, backend) self.params[category] = {} @@ -340,8 +391,9 @@ def __configure_backend(self, backend: str = None, category: str = "find", self.params[category]["similarity"] = CVParameter(0.75, 0.0, 1.0) log.log(9, "%s %s\n", category, self.params[category]) - def configure_backend(self, backend: str = None, category: str = "find", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "find", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -349,18 +401,24 @@ def configure_backend(self, backend: str = None, category: str = "find", """ self.__configure_backend(backend, category, reset) - def __synchronize_backend(self, backend: str = None, category: str = "find", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "find", reset: bool = False + ) -> None: if category != "find": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(Finder, self).synchronize_backend("cv", reset=True) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) backend = self.params[category]["backend"] - def synchronize_backend(self, backend: str = None, category: str = "find", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "find", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -379,8 +437,10 @@ def can_calibrate(self, category: str, mark: bool) -> None: supported backend categories """ if category not in self.categories.keys(): - raise UnsupportedBackendError("Category '%s' not among the " - "supported %s" % (category, self.categories.keys())) + raise UnsupportedBackendError( + "Category '%s' not among the " + "supported %s" % (category, self.categories.keys()) + ) for key, value in self.params[category].items(): if not isinstance(value, CVParameter): @@ -394,7 +454,9 @@ def can_calibrate(self, category: str, mark: bool) -> None: value.fixed = True else: value.fixed = not mark - log.debug("Setting %s/%s to fixed=%s for calibration", category, key, value.fixed) + log.debug( + "Setting %s/%s to fixed=%s for calibration", category, key, value.fixed + ) def copy(self) -> "Finder": """ @@ -412,7 +474,9 @@ def copy(self) -> "Finder": for category in self.params.keys(): for param in self.params[category].keys(): - acopy.params[category][param] = copy.deepcopy(self.params[category][param]) + acopy.params[category][param] = copy.deepcopy( + self.params[category][param] + ) for category in self.params.keys(): try: @@ -432,7 +496,9 @@ def find(self, needle: "Target | list[Target]", haystack: "Image") -> "list[Matc :returns: all found matches (one in most use cases) :raises: :py:class:`NotImplementedError` if the base class method is called """ - raise NotImplementedError("Abstract method call - call implementation of this class") + raise NotImplementedError( + "Abstract method call - call implementation of this class" + ) def log(self, lvl: int) -> None: """ @@ -449,9 +515,13 @@ def log(self, lvl: int) -> None: return # no hotmaps to log elif len(self.imglog.hotmaps) == 0: - raise MissingHotmapError("No matching was performed in order to be image logged") + raise MissingHotmapError( + "No matching was performed in order to be image logged" + ) - similarity = self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0 + similarity = ( + self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0 + ) name = "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity) self.imglog.dump_hotmap(name, self.imglog.hotmaps[-1]) @@ -473,18 +543,22 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: if configure: self.__configure_backend(reset=True) - def __configure_backend(self, backend: str = None, category: str = "autopy", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "autopy", reset: bool = False + ) -> None: if category != "autopy": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(AutoPyFinder, self).configure_backend(backend="autopy", reset=True) self.params[category] = {} self.params[category]["backend"] = "none" - def configure_backend(self, backend: str = None, category: str = "autopy", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "autopy", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -525,13 +599,15 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": self._bitmapcache[needle.filename] = autopy_needle # TODO: Use in-memory conversion - with NamedTemporaryFile(prefix='guibot', suffix='.png') as f: + with NamedTemporaryFile(prefix="guibot", suffix=".png") as f: haystack.save(f.name) autopy_screenshot = bitmap.Bitmap.open(f.name) autopy_tolerance = 1.0 - self.params["find"]["similarity"].value - log.debug("Performing autopy template matching with tolerance %s (color)", - autopy_tolerance) + log.debug( + "Performing autopy template matching with tolerance %s (color)", + autopy_tolerance, + ) # TODO: since only the coordinates are available and fuzzy areas of # matches are returned we need to ask autopy team for returning @@ -548,10 +624,12 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": w, h = needle.width, needle.height dx, dy = needle.center_offset.x, needle.center_offset.y from .match import Match + matches = [Match(x, y, w, h, dx, dy, similarity)] from PIL import ImageDraw + draw = ImageDraw.Draw(self.imglog.hotmaps[-1]) - draw.rectangle((x, y, x+w, y+h), outline=(0, 0, 255)) + draw.rectangle((x, y, x + w, y + h), outline=(0, 0, 255)) del draw else: matches = [] @@ -582,15 +660,18 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: if configure: self.__configure(reset=True) - def __configure_backend(self, backend: str = None, category: str = "contour", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "contour", reset: bool = False + ) -> None: """ Custom implementation of the base method. See base method for details. """ if category not in ["contour", "threshold"]: - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(ContourFinder, self).configure_backend("contour", reset=True) if category == "contour" and backend is None: @@ -598,8 +679,10 @@ def __configure_backend(self, backend: str = None, category: str = "contour", elif category == "threshold" and backend is None: backend = GlobalConfig.contour_threshold_backend if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) log.log(9, "Setting backend for %s to %s", category, backend) self.params[category] = {} @@ -607,12 +690,18 @@ def __configure_backend(self, backend: str = None, category: str = "contour", if category == "contour": # 1 RETR_EXTERNAL, 2 RETR_LIST, 3 RETR_CCOMP, 4 RETR_TREE - self.params[category]["retrievalMode"] = CVParameter(2, 1, 4, enumerated=True) + self.params[category]["retrievalMode"] = CVParameter( + 2, 1, 4, enumerated=True + ) # 1 CHAIN_APPROX_NONE, 2 CHAIN_APPROX_SIMPLE, 3 CHAIN_APPROX_TC89_L1, 4 CHAIN_APPROX_TC89_KCOS - self.params[category]["approxMethod"] = CVParameter(2, 1, 4, enumerated=True) + self.params[category]["approxMethod"] = CVParameter( + 2, 1, 4, enumerated=True + ) self.params[category]["minArea"] = CVParameter(0, 0, None, 100.0) # 1 L1 method, 2 L2 method, 3 L3 method - self.params[category]["contoursMatch"] = CVParameter(1, 1, 3, enumerated=True) + self.params[category]["contoursMatch"] = CVParameter( + 1, 1, 3, enumerated=True + ) elif category == "threshold": # 1 normal, 2 median, 3 gaussian, 4 none self.params[category]["blurType"] = CVParameter(4, 1, 4, enumerated=True) @@ -623,23 +712,36 @@ def __configure_backend(self, backend: str = None, category: str = "contour", self.params[category]["thresholdValue"] = CVParameter(122, 0, 255, 50.0) self.params[category]["thresholdMax"] = CVParameter(255, 0, 255, 20.0) # 0 binary, 1 binar_inv, 2 trunc, 3 tozero, 4 tozero_inv, 5 mask, 6 otsu, 7 triangle - self.params[category]["thresholdType"] = CVParameter(1, 0, 7, enumerated=True) + self.params[category]["thresholdType"] = CVParameter( + 1, 0, 7, enumerated=True + ) elif backend == "adaptive": self.params[category]["thresholdMax"] = CVParameter(255, 0, 255, 20.0) # 0 adaptive mean threshold, 1 adaptive gaussian (weighted mean) threshold - self.params[category]["adaptiveMethod"] = CVParameter(1, 0, 1, enumerated=True) + self.params[category]["adaptiveMethod"] = CVParameter( + 1, 0, 1, enumerated=True + ) # 0 normal, 1 inverted - self.params[category]["thresholdType"] = CVParameter(1, 0, 1, enumerated=True) + self.params[category]["thresholdType"] = CVParameter( + 1, 0, 1, enumerated=True + ) # size of the neighborhood to consider to adaptive thresholding - self.params[category]["blockSize"] = CVParameter(11, 3, None, 200.0, 2.0) + self.params[category]["blockSize"] = CVParameter( + 11, 3, None, 200.0, 2.0 + ) # constant to substract from the (weighted) calculated mean self.params[category]["constant"] = CVParameter(2, -255, 255, 1.0) elif backend == "canny": - self.params[category]["threshold1"] = CVParameter(100.0, 0.0, None, 50.0) - self.params[category]["threshold2"] = CVParameter(1000.0, 0.0, None, 500.0) - - def configure_backend(self, backend: str = None, category: str = "contour", - reset: bool = False) -> None: + self.params[category]["threshold1"] = CVParameter( + 100.0, 0.0, None, 50.0 + ) + self.params[category]["threshold2"] = CVParameter( + 1000.0, 0.0, None, 500.0 + ) + + def configure_backend( + self, backend: str = None, category: str = "contour", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -647,11 +749,21 @@ def configure_backend(self, backend: str = None, category: str = "contour", """ self.__configure_backend(backend, category, reset) - def __configure(self, threshold_filter: str = None, reset: bool = True, **kwargs: dict[str, type]) -> None: + def __configure( + self, + threshold_filter: str = None, + reset: bool = True, + **kwargs: dict[str, type] + ) -> None: self.__configure_backend(category="contour", reset=reset) self.__configure_backend(threshold_filter, "threshold") - def configure(self, threshold_filter: str = None, reset: bool = True, **kwargs: dict[str, type]) -> None: + def configure( + self, + threshold_filter: str = None, + reset: bool = True, + **kwargs: dict[str, type] + ) -> None: """ Custom implementation of the base method. @@ -704,10 +816,13 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": for j, ncontour in enumerate(needle_contours): if cv2.contourArea(ncontour) < self.params["contour"]["minArea"].value: continue - distances[i, j] = cv2.matchShapes(hcontour, ncontour, self.params["contour"]["contoursMatch"].value, 0) + distances[i, j] = cv2.matchShapes( + hcontour, ncontour, self.params["contour"]["contoursMatch"].value, 0 + ) assert distances[i, j] >= 0.0 from .match import Match + matches = [] nx, ny, nw, nh = cv2.boundingRect(numpy.concatenate(needle_contours, axis=0)) while True: @@ -719,41 +834,78 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": # we don't allow collapsing into the same needle contour, i.e. # the map from the needle to the haystack contours is injective # -> so here we cross the entire row rather than one value in it - distances[index[0][0], :] = 1.1 # like this works even for similarity 0.0 + distances[index[0][0], :] = ( + 1.1 # like this works even for similarity 0.0 + ) matching_haystack_contours.append(haystack_contours[index[0][0]]) average_distance = numpy.average(matching_haystack_distances) required_distance = 1.0 - self.params["find"]["similarity"].value - logging.debug("Average distance to next needle shape is %s of max allowed %s", - average_distance, required_distance) + logging.debug( + "Average distance to next needle shape is %s of max allowed %s", + average_distance, + required_distance, + ) if average_distance > required_distance: break else: shape = numpy.concatenate(matching_haystack_contours, axis=0) x, y, w, h = cv2.boundingRect(shape) # calculate needle upleft and downright points to return its (0,0) location - needle_upleft = (max(int((x-nx)*float(w)/nw), 0), max(int((y-ny)*float(h)/nh), 0)) - needle_downright = (min(int(needle_upleft[0]+needle.width*float(w)/nw), haystack.width), - min(int(needle_upleft[1]+needle.height*float(h)/nh), haystack.height)) - needle_center_offset = (needle.center_offset.x*float(w)/nw, - needle.center_offset.y*float(h)/nh) - cv2.rectangle(self.imglog.hotmaps[-1], needle_upleft, needle_downright, (0, 0, 0), 2) - cv2.rectangle(self.imglog.hotmaps[-1], needle_upleft, needle_downright, (255, 255, 255), 1) + needle_upleft = ( + max(int((x - nx) * float(w) / nw), 0), + max(int((y - ny) * float(h) / nh), 0), + ) + needle_downright = ( + min( + int(needle_upleft[0] + needle.width * float(w) / nw), + haystack.width, + ), + min( + int(needle_upleft[1] + needle.height * float(h) / nh), + haystack.height, + ), + ) + needle_center_offset = ( + needle.center_offset.x * float(w) / nw, + needle.center_offset.y * float(h) / nh, + ) + cv2.rectangle( + self.imglog.hotmaps[-1], + needle_upleft, + needle_downright, + (0, 0, 0), + 2, + ) + cv2.rectangle( + self.imglog.hotmaps[-1], + needle_upleft, + needle_downright, + (255, 255, 255), + 1, + ) # NOTE: to extract the region of interest just do: # roi = thresh_haystack[y:y+h,x:x+w] similarity = 1.0 - average_distance self.imglog.similarities.append(similarity) self.imglog.locations.append(needle_upleft) - matches.append(Match(needle_upleft[0], needle_upleft[1], - needle_downright[0] - needle_upleft[0], - needle_downright[1] - needle_upleft[1], - needle_center_offset[0], needle_center_offset[1], - similarity)) + matches.append( + Match( + needle_upleft[0], + needle_upleft[1], + needle_downright[0] - needle_upleft[0], + needle_downright[1] - needle_upleft[1], + needle_center_offset[0], + needle_center_offset[1], + similarity, + ) + ) self.imglog.log(30) return matches def _binarize_image(self, image: "Matlike", log: bool = False) -> "Matlike": import cv2 + # blur first in order to avoid unwonted edges caused from noise blurSize = self.params["threshold"]["blurKernelSize"].value blurDeviation = self.params["threshold"]["blurKernelSigma"].value @@ -763,37 +915,50 @@ def _binarize_image(self, image: "Matlike", log: bool = False) -> "Matlike": elif self.params["threshold"]["blurType"].value == 2: blur_image = cv2.medianBlur(gray_image, blurSize) elif self.params["threshold"]["blurType"].value == 3: - blur_image = cv2.GaussianBlur(gray_image, (blurSize, blurSize), blurDeviation) + blur_image = cv2.GaussianBlur( + gray_image, (blurSize, blurSize), blurDeviation + ) elif self.params["threshold"]["blurType"].value == 4: blur_image = gray_image # second stage: thresholding if self.params["threshold"]["backend"] == "normal": - _, thresh_image = cv2.threshold(blur_image, - self.params["threshold"]["thresholdValue"].value, - self.params["threshold"]["thresholdMax"].value, - self.params["threshold"]["thresholdType"].value) + _, thresh_image = cv2.threshold( + blur_image, + self.params["threshold"]["thresholdValue"].value, + self.params["threshold"]["thresholdMax"].value, + self.params["threshold"]["thresholdType"].value, + ) elif self.params["threshold"]["backend"] == "adaptive": - thresh_image = cv2.adaptiveThreshold(blur_image, - self.params["threshold"]["thresholdMax"].value, - self.params["threshold"]["adaptiveMethod"].value, - self.params["threshold"]["thresholdType"].value, - self.params["threshold"]["blockSize"].value, - self.params["threshold"]["constant"].value) + thresh_image = cv2.adaptiveThreshold( + blur_image, + self.params["threshold"]["thresholdMax"].value, + self.params["threshold"]["adaptiveMethod"].value, + self.params["threshold"]["thresholdType"].value, + self.params["threshold"]["blockSize"].value, + self.params["threshold"]["constant"].value, + ) elif self.params["threshold"]["backend"] == "canny": - thresh_image = cv2.Canny(blur_image, - self.params["threshold"]["threshold1"].value, - self.params["threshold"]["threshold2"].value) + thresh_image = cv2.Canny( + blur_image, + self.params["threshold"]["threshold1"].value, + self.params["threshold"]["threshold2"].value, + ) if log: self.imglog.hotmaps.append(thresh_image) return thresh_image - def _extract_contours(self, countours_image: "Matlike", log: bool = False) -> "list[Matlike]": + def _extract_contours( + self, countours_image: "Matlike", log: bool = False + ) -> "list[Matlike]": import cv2 - rargs = cv2.findContours(countours_image, - self.params["contour"]["retrievalMode"].value, - self.params["contour"]["approxMethod"].value) + + rargs = cv2.findContours( + countours_image, + self.params["contour"]["retrievalMode"].value, + self.params["contour"]["approxMethod"].value, + ) if len(rargs) == 3: _, contours, hierarchy = rargs else: @@ -819,16 +984,26 @@ def log(self, lvl: int) -> None: return # no hotmaps to log elif len(self.imglog.hotmaps) == 0: - raise MissingHotmapError("No matching was performed in order to be image logged") - - self.imglog.dump_hotmap("imglog%s-3hotmap-1threshold.png" % self.imglog.printable_step, - self.imglog.hotmaps[0]) - self.imglog.dump_hotmap("imglog%s-3hotmap-2contours.png" % self.imglog.printable_step, - self.imglog.hotmaps[1]) - - similarity = self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0 - self.imglog.dump_hotmap("imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity), - self.imglog.hotmaps[-1]) + raise MissingHotmapError( + "No matching was performed in order to be image logged" + ) + + self.imglog.dump_hotmap( + "imglog%s-3hotmap-1threshold.png" % self.imglog.printable_step, + self.imglog.hotmaps[0], + ) + self.imglog.dump_hotmap( + "imglog%s-3hotmap-2contours.png" % self.imglog.printable_step, + self.imglog.hotmaps[1], + ) + + similarity = ( + self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0 + ) + self.imglog.dump_hotmap( + "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity), + self.imglog.hotmaps[-1], + ) self.imglog.clear() ImageLogger.step += 1 @@ -844,28 +1019,37 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: # available and currently fully compatible methods self.categories["template"] = "template_matchers" # we only use the normalized version of "sqdiff", "ccorr", and "ccoeff" - self.algorithms["template_matchers"] = ("sqdiff_normed", "ccorr_normed", "ccoeff_normed") + self.algorithms["template_matchers"] = ( + "sqdiff_normed", + "ccorr_normed", + "ccoeff_normed", + ) # additional preparation (no synchronization available) if configure: self.__configure_backend(reset=True) - def __configure_backend(self, backend: str = None, category: str = "template", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "template", reset: bool = False + ) -> None: """ Custom implementation of the base method. See base method for details. """ if category != "template": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(TemplateFinder, self).configure_backend("template", reset=True) if backend is None: backend = GlobalConfig.template_match_backend if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) log.log(9, "Setting backend for %s to %s", category, backend) self.params[category] = {} @@ -873,8 +1057,9 @@ def __configure_backend(self, backend: str = None, category: str = "template", self.params[category]["nocolor"] = CVParameter(False) log.log(9, "%s %s\n", category, self.params[category]) - def configure_backend(self, backend: str = None, category: str = "template", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "template", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -899,14 +1084,25 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": self.imglog.haystack = haystack self.imglog.dump_matched_images() - if self.params["template"]["backend"] not in self.algorithms["template_matchers"]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (self.params["template"]["backend"], - self.algorithms["template_matchers"])) + if ( + self.params["template"]["backend"] + not in self.algorithms["template_matchers"] + ): + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" + % ( + self.params["template"]["backend"], + self.algorithms["template_matchers"], + ) + ) match_template = self.params["template"]["backend"] no_color = self.params["template"]["nocolor"].value - log.debug("Performing %s template matching %s color", - match_template, "without" if no_color else "with") + log.debug( + "Performing %s template matching %s color", + match_template, + "without" if no_color else "with", + ) result = self._match_template(needle, haystack, no_color, match_template) if result is None: log.warning("OpenCV's template matching returned no result") @@ -917,6 +1113,7 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": import cv2 import numpy + universal_hotmap = result * 255.0 final_hotmap = numpy.array(self.imglog.haystack.pil_image) if self.params["template"]["nocolor"].value: @@ -925,21 +1122,31 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": # extract maxima once for each needle size region similarity = self.params["find"]["similarity"].value from .match import Match + matches = [] while True: minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result) # rectify to the [0,1] interval to avoid negative values in some methods maxVal = min(max(maxVal, 0.0), 1.0) - log.debug('Next best match with value %s (similarity %s) and location (x,y) %s', - str(maxVal), similarity, str(maxLoc)) + log.debug( + "Next best match with value %s (similarity %s) and location (x,y) %s", + str(maxVal), + similarity, + str(maxLoc), + ) if maxVal < similarity: if len(matches) == 0: self.imglog.similarities.append(maxVal) self.imglog.locations.append(maxLoc) current_hotmap = numpy.copy(universal_hotmap) - cv2.circle(current_hotmap, (maxLoc[0], maxLoc[1]), int(30*maxVal), (255, 255, 255)) + cv2.circle( + current_hotmap, + (maxLoc[0], maxLoc[1]), + int(30 * maxVal), + (255, 255, 255), + ) self.imglog.hotmaps.append(current_hotmap) self.imglog.hotmaps.append(final_hotmap) @@ -949,12 +1156,17 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": self.imglog.similarities.append(maxVal) self.imglog.locations.append(maxLoc) current_hotmap = numpy.copy(universal_hotmap) - cv2.circle(current_hotmap, (maxLoc[0], maxLoc[1]), int(30*maxVal), (255, 255, 255)) + cv2.circle( + current_hotmap, + (maxLoc[0], maxLoc[1]), + int(30 * maxVal), + (255, 255, 255), + ) x, y = maxLoc w, h = needle.width, needle.height dx, dy = needle.center_offset.x, needle.center_offset.y - cv2.rectangle(final_hotmap, (x, y), (x+w, y+h), (0, 0, 0), 2) - cv2.rectangle(final_hotmap, (x, y), (x+w, y+h), (255, 255, 255), 1) + cv2.rectangle(final_hotmap, (x, y), (x + w, y + h), (0, 0, 0), 2) + cv2.rectangle(final_hotmap, (x, y), (x + w, y + h), (255, 255, 255), 1) self.imglog.hotmaps.append(current_hotmap) log.debug("Next best match is acceptable") matches.append(Match(x, y, w, h, dx, dy, maxVal)) @@ -970,10 +1182,22 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": match_y1 = min(maxLoc[1] + int(0.5 * needle.height), res_h) # log this only if performing deep internal debugging - log.log(9, "Wipe image matches in x [%s, %s]/[%s, %s]", - match_x0, match_x1, 0, res_w) - log.log(9, "Wipe image matches in y [%s, %s]/[%s, %s]", - match_y0, match_y1, 0, res_h) + log.log( + 9, + "Wipe image matches in x [%s, %s]/[%s, %s]", + match_x0, + match_x1, + 0, + res_w, + ) + log.log( + 9, + "Wipe image matches in y [%s, %s]/[%s, %s]", + match_y0, + match_y1, + 0, + res_h, + ) # clean found image to look for next safe distance match result[match_y0:match_y1, match_x0:match_x1] = 0.0 @@ -985,8 +1209,9 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": return matches - def _match_template(self, needle: "Image", haystack: "Image", nocolor: str, - method: str) -> "Matlike | None": + def _match_template( + self, needle: "Image", haystack: "Image", nocolor: str, method: str + ) -> "Matlike | None": """ EXTRA DOCSTRING: Template matching backend - wrapper. @@ -995,15 +1220,26 @@ def _match_template(self, needle: "Image", haystack: "Image", nocolor: str, """ # sanity check: needle size must be smaller than haystack if haystack.width < needle.width or haystack.height < needle.height: - log.warning("The size of the searched image (%sx%s) does not fit the search region (%sx%s)", - needle.width, needle.height, haystack.width, haystack.height) + log.warning( + "The size of the searched image (%sx%s) does not fit the search region (%sx%s)", + needle.width, + needle.height, + haystack.width, + haystack.height, + ) return None import cv2 import numpy - methods = {"sqdiff": cv2.TM_SQDIFF, "sqdiff_normed": cv2.TM_SQDIFF_NORMED, - "ccorr": cv2.TM_CCORR, "ccorr_normed": cv2.TM_CCORR_NORMED, - "ccoeff": cv2.TM_CCOEFF, "ccoeff_normed": cv2.TM_CCOEFF_NORMED} + + methods = { + "sqdiff": cv2.TM_SQDIFF, + "sqdiff_normed": cv2.TM_SQDIFF_NORMED, + "ccorr": cv2.TM_CCORR, + "ccorr_normed": cv2.TM_CCORR_NORMED, + "ccoeff": cv2.TM_CCOEFF, + "ccoeff_normed": cv2.TM_CCOEFF_NORMED, + } if method not in methods.keys(): raise UnsupportedBackendError("Supported algorithms are in conflict") @@ -1033,16 +1269,25 @@ def log(self, lvl: int) -> None: return # no hotmaps to log elif len(self.imglog.hotmaps) == 0: - raise MissingHotmapError("No matching was performed in order to be image logged") + raise MissingHotmapError( + "No matching was performed in order to be image logged" + ) for i in range(len(self.imglog.similarities)): - name = "imglog%s-3hotmap-%stemplate-%s.png" % (self.imglog.printable_step, - i + 1, self.imglog.similarities[i]) + name = "imglog%s-3hotmap-%stemplate-%s.png" % ( + self.imglog.printable_step, + i + 1, + self.imglog.similarities[i], + ) self.imglog.dump_hotmap(name, self.imglog.hotmaps[i]) - similarity = self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0 - self.imglog.dump_hotmap("imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity), - self.imglog.hotmaps[-1]) + similarity = ( + self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0 + ) + self.imglog.dump_hotmap( + "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity), + self.imglog.hotmaps[-1], + ) self.imglog.clear() ImageLogger.step += 1 @@ -1066,11 +1311,23 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: self.categories["fextract"] = "feature_extractors" self.categories["fmatch"] = "feature_matchers" self.algorithms["feature_projectors"] = ("mixed",) - self.algorithms["feature_matchers"] = ("BruteForce", "BruteForce-L1", "BruteForce-Hamming", - "BruteForce-Hamming(2)") - self.algorithms["feature_detectors"] = ("ORB", "BRISK", "KAZE", "AKAZE", "MSER", - "AgastFeatureDetector", "FastFeatureDetector", "GFTTDetector", - "SimpleBlobDetector") + self.algorithms["feature_matchers"] = ( + "BruteForce", + "BruteForce-L1", + "BruteForce-Hamming", + "BruteForce-Hamming(2)", + ) + self.algorithms["feature_detectors"] = ( + "ORB", + "BRISK", + "KAZE", + "AKAZE", + "MSER", + "AgastFeatureDetector", + "FastFeatureDetector", + "GFTTDetector", + "SimpleBlobDetector", + ) # TODO: we could also support "StereoSGBM" but it needs initialization arguments # BUG: "KAZE", "AKAZE" we get internal error when using KAZE/AKAZE even though it should be possible self.algorithms["feature_extractors"] = ("ORB", "BRISK") @@ -1086,10 +1343,13 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: if synchronize: self.__synchronize(reset=False) - def __configure_backend(self, backend: str = None, category: str = "feature", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "feature", reset: bool = False + ) -> None: if category not in ["feature", "fdetect", "fextract", "fmatch"]: - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(FeatureFinder, self).configure_backend("feature", reset=True) if category == "feature" and backend is None: @@ -1101,8 +1361,10 @@ def __configure_backend(self, backend: str = None, category: str = "feature", elif category == "fmatch" and backend is None: backend = GlobalConfig.feature_match_backend if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) log.log(9, "Setting backend for %s to %s", category, backend) self.params[category] = {} @@ -1110,22 +1372,30 @@ def __configure_backend(self, backend: str = None, category: str = "feature", if category == "feature": # 0 for homography, 1 for fundamental matrix - self.params[category]["projectionMethod"] = CVParameter(0, 0, 1, enumerated=True) - self.params[category]["ransacReprojThreshold"] = CVParameter(0.0, 0.0, 200.0, 50.0) + self.params[category]["projectionMethod"] = CVParameter( + 0, 0, 1, enumerated=True + ) + self.params[category]["ransacReprojThreshold"] = CVParameter( + 0.0, 0.0, 200.0, 50.0 + ) self.params[category]["minDetectedFeatures"] = CVParameter(4, 1, None) self.params[category]["minMatchedFeatures"] = CVParameter(4, 1, None) # 0 for matched/detected ratio, 1 for projected/matched ratio - self.params[category]["similarityRatio"] = CVParameter(1, 0, 1, enumerated=True) + self.params[category]["similarityRatio"] = CVParameter( + 1, 0, 1, enumerated=True + ) elif category == "fdetect": self.params[category]["nzoom"] = CVParameter(1.0, 1.0, 10.0, 2.5) self.params[category]["hzoom"] = CVParameter(1.0, 1.0, 10.0, 2.5) import cv2 + feature_detector_create = getattr(cv2, "%s_create" % backend) backend_obj = feature_detector_create() elif category == "fextract": import cv2 + descriptor_extractor_create = getattr(cv2, "%s_create" % backend) backend_obj = descriptor_extractor_create() @@ -1134,10 +1404,14 @@ def __configure_backend(self, backend: str = None, category: str = "feature", self.params[category]["refinements"] = CVParameter(50, 1, None) self.params[category]["recalc_interval"] = CVParameter(10, 1, None) self.params[category]["variants_k"] = CVParameter(100, 1, None) - self.params[category]["variants_ratio"] = CVParameter(0.33, 0.0001, 1.0, 0.25) + self.params[category]["variants_ratio"] = CVParameter( + 0.33, 0.0001, 1.0, 0.25 + ) return else: - self.params[category]["ratioThreshold"] = CVParameter(0.65, 0.0, 1.0, 0.25, 0.01) + self.params[category]["ratioThreshold"] = CVParameter( + 0.65, 0.0, 1.0, 0.25, 0.01 + ) self.params[category]["ratioTest"] = CVParameter(False) self.params[category]["symmetryTest"] = CVParameter(False) @@ -1147,6 +1421,7 @@ def __configure_backend(self, backend: str = None, category: str = "feature", else: import cv2 + # NOTE: descriptor matcher creation is kept the old way while feature # detection and extraction not - example of the untidy maintenance of OpenCV backend_obj = cv2.DescriptorMatcher_create(backend) @@ -1176,7 +1451,9 @@ def __configure_backend(self, backend: str = None, category: str = "feature", elif category in ("fdetect", "fextract") and param == "WTA_K": self.params[category][param] = CVParameter(val, 2, 4, 1.0) elif category in ("fdetect", "fextract") and param == "ScaleFactor": - self.params[category][param] = CVParameter(val, 1.01, 2.0, 0.25, 0.05) + self.params[category][param] = CVParameter( + val, 1.01, 2.0, 0.25, 0.05 + ) elif category in ("fdetect", "fextract") and param == "NLevels": self.params[category][param] = CVParameter(val, 1, 100, 25, 0.5) elif category in ("fdetect", "fextract") and param == "NLevels": @@ -1187,8 +1464,9 @@ def __configure_backend(self, backend: str = None, category: str = "feature", self.params[category][param] = CVParameter(val) log.log(9, "%s=%s", param, val) - def configure_backend(self, backend: str = None, category: str = "feature", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "feature", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -1208,17 +1486,27 @@ def configure_backend(self, backend: str = None, category: str = "feature", """ self.__configure_backend(backend, category, reset) - def __configure(self, feature_detect: str = None, feature_extract: str = None, - feature_match: str = None, reset: bool = True, - **kwargs: dict[str, type]) -> None: + def __configure( + self, + feature_detect: str = None, + feature_extract: str = None, + feature_match: str = None, + reset: bool = True, + **kwargs: dict[str, type] + ) -> None: self.__configure_backend(category="feature", reset=reset) self.__configure_backend(feature_detect, "fdetect") self.__configure_backend(feature_extract, "fextract") self.__configure_backend(feature_match, "fmatch") - def configure(self, feature_detect: str = None, feature_extract: str = None, - feature_match: str = None, reset: bool = True, - **kwargs: dict[str, type]) -> None: + def configure( + self, + feature_detect: str = None, + feature_extract: str = None, + feature_match: str = None, + reset: bool = True, + **kwargs: dict[str, type] + ) -> None: """ Custom implementation of the base method. @@ -1229,14 +1517,19 @@ def configure(self, feature_detect: str = None, feature_extract: str = None, """ self.__configure(feature_detect, feature_extract, feature_match, reset) - def __synchronize_backend(self, backend: str = None, category: str = "feature", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "feature", reset: bool = False + ) -> None: if category not in ["feature", "fdetect", "fextract", "fmatch"]: - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(FeatureFinder, self).synchronize_backend("feature", reset=True) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) backend = self.params[category]["backend"] backend_obj = None @@ -1245,14 +1538,17 @@ def __synchronize_backend(self, backend: str = None, category: str = "feature", return elif category == "fdetect": import cv2 + feature_detector_create = getattr(cv2, "%s_create" % backend) backend_obj = feature_detector_create() elif category == "fextract": import cv2 + descriptor_extractor_create = getattr(cv2, "%s_create" % backend) backend_obj = descriptor_extractor_create() elif category == "fmatch": import cv2 + # NOTE: descriptor matcher creation is kept the old way while feature # detection and extraction not - example of the untidy maintenance of OpenCV backend_obj = cv2.DescriptorMatcher_create(backend) @@ -1284,8 +1580,9 @@ def __synchronize_backend(self, backend: str = None, category: str = "feature", elif category == "fmatch": self.matcher = backend_obj - def synchronize_backend(self, backend: str = None, category: str = "feature", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "feature", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -1293,15 +1590,25 @@ def synchronize_backend(self, backend: str = None, category: str = "feature", """ self.__synchronize_backend(backend, category, reset) - def __synchronize(self, feature_detect: str = None, feature_extract: str = None, - feature_match: str = None, reset: bool = True) -> None: + def __synchronize( + self, + feature_detect: str = None, + feature_extract: str = None, + feature_match: str = None, + reset: bool = True, + ) -> None: self.__synchronize_backend(category="feature", reset=reset) self.__synchronize_backend(feature_detect, "fdetect") self.__synchronize_backend(feature_extract, "fextract") self.__synchronize_backend(feature_match, "fmatch") - def synchronize(self, feature_detect: str = None, feature_extract: str = None, - feature_match: str = None, reset: bool = True) -> None: + def synchronize( + self, + feature_detect: str = None, + feature_extract: str = None, + feature_match: str = None, + reset: bool = True, + ) -> None: """ Custom implementation of the base method. @@ -1334,6 +1641,7 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": import cv2 import numpy + ngray = cv2.cvtColor(numpy.array(needle.pil_image), cv2.COLOR_RGB2GRAY) hgray = cv2.cvtColor(numpy.array(haystack.pil_image), cv2.COLOR_RGB2GRAY) self.imglog.hotmaps.append(numpy.array(haystack.pil_image)) @@ -1343,14 +1651,21 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": # project more points for debugging purposes and image logging npoints = [] - npoints.extend([(0, 0), (needle.width, 0), (0, needle.height), - (needle.width, needle.height)]) + npoints.extend( + [ + (0, 0), + (needle.width, 0), + (0, needle.height), + (needle.width, needle.height), + ] + ) npoints.append((needle.width / 2, needle.height / 2)) similarity = self.params["find"]["similarity"].value hpoints = self._project_features(npoints, ngray, hgray, similarity) if hpoints is not None and len(hpoints) > 0: from .match import Match + x, y = hpoints[0] w, h = tuple(numpy.abs(numpy.subtract(hpoints[3], hpoints[0]))) # TODO: projecting offset requires more effort @@ -1360,8 +1675,13 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": self.imglog.log(40) return [] - def _project_features(self, locations_in_needle: list[tuple[int, int]], ngray: "Matlike", - hgray: "Matlike", similarity: float) -> list[tuple[int, int]] | None: + def _project_features( + self, + locations_in_needle: list[tuple[int, int]], + ngray: "Matlike", + hgray: "Matlike", + similarity: float, + ) -> list[tuple[int, int]] | None: """ EXTRA DOCSTRING: Feature matching backend - wrapper. @@ -1372,45 +1692,70 @@ def _project_features(self, locations_in_needle: list[tuple[int, int]], ngray: " self.imglog.locations.append((0, 0)) self.imglog.similarities.append(0.0) - log.debug("Performing %s feature matching (no color)", - "-".join([self.params["fdetect"]["backend"], - self.params["fextract"]["backend"], - self.params["fmatch"]["backend"]])) - nkp, ndc, hkp, hdc = self._detect_features(ngray, hgray, - self.params["fdetect"]["backend"], - self.params["fextract"]["backend"]) + log.debug( + "Performing %s feature matching (no color)", + "-".join( + [ + self.params["fdetect"]["backend"], + self.params["fextract"]["backend"], + self.params["fmatch"]["backend"], + ] + ), + ) + nkp, ndc, hkp, hdc = self._detect_features( + ngray, + hgray, + self.params["fdetect"]["backend"], + self.params["fextract"]["backend"], + ) min_features = self.params["feature"]["minDetectedFeatures"].value if len(nkp) < min_features or len(hkp) < min_features: - log.debug("No acceptable best match after feature detection: " - "only %s\\%s needle and %s\\%s haystack features detected", - len(nkp), min_features, len(hkp), min_features) + log.debug( + "No acceptable best match after feature detection: " + "only %s\\%s needle and %s\\%s haystack features detected", + len(nkp), + min_features, + len(hkp), + min_features, + ) return None - mnkp, mhkp = self._match_features(nkp, ndc, hkp, hdc, - self.params["fmatch"]["backend"]) + mnkp, mhkp = self._match_features( + nkp, ndc, hkp, hdc, self.params["fmatch"]["backend"] + ) min_features = self.params["feature"]["minMatchedFeatures"].value if self.imglog.similarities[-1] < similarity or len(mnkp) < min_features: - log.debug("No acceptable best match after feature matching:\n" - "- matched features %s of %s required\n" - "- best match similarity %s of %s required", - len(mnkp), min_features, - self.imglog.similarities[-1], similarity) + log.debug( + "No acceptable best match after feature matching:\n" + "- matched features %s of %s required\n" + "- best match similarity %s of %s required", + len(mnkp), + min_features, + self.imglog.similarities[-1], + similarity, + ) return None locations_in_haystack = self._project_locations(locations_in_needle, mnkp, mhkp) if self.imglog.similarities[-1] < similarity: - log.debug("No acceptable best match after RANSAC projection: " - "best match similarity %s is less than required %s", - self.imglog.similarities[-1], similarity) + log.debug( + "No acceptable best match after RANSAC projection: " + "best match similarity %s is less than required %s", + self.imglog.similarities[-1], + similarity, + ) return None else: - self._log_features(30, self.imglog.locations, self.imglog.hotmaps[-1], 3, 0, 0, 255) + self._log_features( + 30, self.imglog.locations, self.imglog.hotmaps[-1], 3, 0, 0, 255 + ) return locations_in_haystack - def _detect_features(self, ngray: int, hgray: int, detect: str, - extract: str) -> tuple[list[Any], list[Any], list[Any], list[Any]]: + def _detect_features( + self, ngray: int, hgray: int, detect: str, extract: str + ) -> tuple[list[Any], list[Any], list[Any], list[Any]]: """ EXTRA DOCSTRING: Feature matching backend - detection/extraction stage (1). @@ -1421,6 +1766,7 @@ def _detect_features(self, ngray: int, hgray: int, detect: str, # zoom in if explicitly set import cv2 + if nfactor > 1.0: log.debug("Zooming x%i needle", nfactor) ngray = cv2.resize(ngray, None, fx=nfactor, fy=nfactor) @@ -1429,8 +1775,10 @@ def _detect_features(self, ngray: int, hgray: int, detect: str, hgray = cv2.resize(hgray, None, fx=hfactor, fy=hfactor) # include only methods tested for compatibility - if (detect in self.algorithms["feature_detectors"] - and extract in self.algorithms["feature_extractors"]): + if ( + detect in self.algorithms["feature_detectors"] + and extract in self.algorithms["feature_extractors"] + ): self.synchronize_backend(category="fdetect") self.synchronize_backend(category="fextract") @@ -1443,32 +1791,47 @@ def _detect_features(self, ngray: int, hgray: int, detect: str, (hkeypoints, hdescriptors) = self.extractor.compute(hgray, hkeypoints) else: - raise UnsupportedBackendError("Feature detector %s is not among the supported" - "ones %s" % (detect, self.algorithms[self.categories["fdetect"]])) + raise UnsupportedBackendError( + "Feature detector %s is not among the supported" + "ones %s" % (detect, self.algorithms[self.categories["fdetect"]]) + ) # reduce keypoint coordinates to the original image size for nkeypoint in nkeypoints: - nkeypoint.pt = (int(nkeypoint.pt[0] / nfactor), - int(nkeypoint.pt[1] / nfactor)) + nkeypoint.pt = ( + int(nkeypoint.pt[0] / nfactor), + int(nkeypoint.pt[1] / nfactor), + ) for hkeypoint in hkeypoints: - hkeypoint.pt = (int(hkeypoint.pt[0] / hfactor), - int(hkeypoint.pt[1] / hfactor)) - - log.debug("Detected %s keypoints in needle and %s in haystack", - len(nkeypoints), len(hkeypoints)) + hkeypoint.pt = ( + int(hkeypoint.pt[0] / hfactor), + int(hkeypoint.pt[1] / hfactor), + ) + + log.debug( + "Detected %s keypoints in needle and %s in haystack", + len(nkeypoints), + len(hkeypoints), + ) hkp_locations = [hkp.pt for hkp in hkeypoints] self._log_features(10, hkp_locations, self.imglog.hotmaps[-4], 3, 255, 0, 0) return (nkeypoints, ndescriptors, hkeypoints, hdescriptors) - def _match_features(self, nkeypoints: str, ndescriptors: str, - hkeypoints: str, hdescriptors: str, - match: str) -> tuple[list[Any], list[Any]]: + def _match_features( + self, + nkeypoints: str, + ndescriptors: str, + hkeypoints: str, + hdescriptors: str, + match: str, + ) -> tuple[list[Any], list[Any]]: """ EXTRA DOCSTRING: Feature matching backend - matching stage (2). Match two sets of keypoints based on their descriptors. """ + def ratio_test(matches: list[Any]) -> list[Any]: """ The ratio test checks the first and second best match. If their @@ -1485,7 +1848,10 @@ def ratio_test(matches: list[Any]) -> list[Any]: smooth_dist1 = m[0].distance + 0.0000001 smooth_dist2 = m[1].distance + 0.0000001 - if smooth_dist1 / smooth_dist2 < self.params["fmatch"]["ratioThreshold"].value: + if ( + smooth_dist1 / smooth_dist2 + < self.params["fmatch"]["ratioThreshold"].value + ): matches2.append(m[0]) else: matches2.append(m[0]) @@ -1502,6 +1868,7 @@ def symmetry_test(nmatches: list[Any], hmatches: list[Any]) -> list[Any]: match is not too large. """ import cv2 + matches2 = [] for nm in nmatches: for hm in hmatches: @@ -1519,17 +1886,23 @@ def symmetry_test(nmatches: list[Any], hmatches: list[Any]) -> list[Any]: # build matcher and match feature vectors self.synchronize_backend(category="fmatch") else: - raise UnsupportedBackendError("Feature detector %s is not among the supported" - "ones %s" % (match, self.algorithms[self.categories["fmatch"]])) + raise UnsupportedBackendError( + "Feature detector %s is not among the supported" + "ones %s" % (match, self.algorithms[self.categories["fmatch"]]) + ) # find and filter matches through tests if match == "in-house-region": - matches = self.matcher.regionMatch(ndescriptors, hdescriptors, - nkeypoints, hkeypoints, - self.params["fmatch"]["refinements"].value, - self.params["fmatch"]["recalc_interval"].value, - self.params["fmatch"]["variants_k"].value, - self.params["fmatch"]["variants_ratio"].value) + matches = self.matcher.regionMatch( + ndescriptors, + hdescriptors, + nkeypoints, + hkeypoints, + self.params["fmatch"]["refinements"].value, + self.params["fmatch"]["recalc_interval"].value, + self.params["fmatch"]["variants_k"].value, + self.params["fmatch"]["variants_ratio"].value, + ) else: if self.params["fmatch"]["ratioTest"].value: matches = self.matcher.knnMatch(ndescriptors, hdescriptors, 2) @@ -1563,13 +1936,18 @@ def symmetry_test(nmatches: list[Any], hmatches: list[Any]) -> list[Any]: # update the current achieved similarity if matching similarity is used: # won't be updated anymore if self.params["feature"]["similarityRatio"].value == 0 self.imglog.similarities[-1] = match_similarity - log.log(9, "%s\\%s -> %f", len(match_nkeypoints), - len(nkeypoints), match_similarity) + log.log( + 9, "%s\\%s -> %f", len(match_nkeypoints), len(nkeypoints), match_similarity + ) return (match_nkeypoints, match_hkeypoints) - def _project_locations(self, locations_in_needle: list[tuple[int, int]], mnkp: list[Any], - mhkp: list[Any]) -> list[tuple[int, int]]: + def _project_locations( + self, + locations_in_needle: list[tuple[int, int]], + mnkp: list[Any], + mhkp: list[Any], + ) -> list[tuple[int, int]]: """ EXTRA DOCSTRING: Feature matching backend - projecting stage (3). @@ -1594,20 +1972,29 @@ def _project_locations(self, locations_in_needle: list[tuple[int, int]], mnkp: l import cv2 import numpy + # homography and fundamental matrix as options - homography is considered only # for rotation but currently gives better results than the fundamental matrix if self.params["feature"]["projectionMethod"].value == 0: - H, mask = cv2.findHomography(numpy.array([kp.pt for kp in mnkp]), - numpy.array([kp.pt for kp in mhkp]), cv2.RANSAC, - self.params["feature"]["ransacReprojThreshold"].value) + H, mask = cv2.findHomography( + numpy.array([kp.pt for kp in mnkp]), + numpy.array([kp.pt for kp in mhkp]), + cv2.RANSAC, + self.params["feature"]["ransacReprojThreshold"].value, + ) elif self.params["feature"]["projectionMethod"].value == 1: - H, mask = cv2.findFundamentalMat(numpy.array([kp.pt for kp in mnkp]), - numpy.array([kp.pt for kp in mhkp]), - method=cv2.RANSAC, param1=10.0, - param2=0.9) + H, mask = cv2.findFundamentalMat( + numpy.array([kp.pt for kp in mnkp]), + numpy.array([kp.pt for kp in mhkp]), + method=cv2.RANSAC, + param1=10.0, + param2=0.9, + ) else: - raise ValueError("Unsupported projection method - use 0 for homography and " - "1 for fundamentlal matrix") + raise ValueError( + "Unsupported projection method - use 0 for homography and " + "1 for fundamentlal matrix" + ) # measure total used features for the projected focus point if H is None or mask is None: @@ -1656,7 +2043,9 @@ def log(self, lvl: int) -> None: return # no hotmaps to log elif len(self.imglog.hotmaps) == 0: - raise MissingHotmapError("No matching was performed in order to be image logged") + raise MissingHotmapError( + "No matching was performed in order to be image logged" + ) stages = ["detect", "match", "project", ""] for i, stage in enumerate(stages): @@ -1665,22 +2054,35 @@ def log(self, lvl: int) -> None: if self.imglog.logging_level > 20 and stage == "project": continue if stage == "": - name = "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, - self.imglog.similarities[-1]) + name = "imglog%s-3hotmap-%s.png" % ( + self.imglog.printable_step, + self.imglog.similarities[-1], + ) else: - name = "imglog%s-3hotmap-%s%s.png" % (self.imglog.printable_step, - i+1, stage) + name = "imglog%s-3hotmap-%s%s.png" % ( + self.imglog.printable_step, + i + 1, + stage, + ) self.imglog.dump_hotmap(name, self.imglog.hotmaps[i]) self.imglog.clear() ImageLogger.step += 1 - def _log_features(self, lvl: int, locations: list[tuple[float, float]], hotmap: "Matlike", - radius: int = 0, r: int = 255, g: int = 255, - b: int = 255) -> None: + def _log_features( + self, + lvl: int, + locations: list[tuple[float, float]], + hotmap: "Matlike", + radius: int = 0, + r: int = 255, + g: int = 255, + b: int = 255, + ) -> None: if lvl < self.imglog.logging_level: return import cv2 + for loc in locations: x, y = loc cv2.circle(hotmap, (int(x), int(y)), radius, (r, g, b)) @@ -1700,8 +2102,12 @@ class CascadeFinder(Finder): due to the cascade classifier API. """ - def __init__(self, classifier_datapath: str = ".", configure: bool = True, - synchronize: bool = True) -> None: + def __init__( + self, + classifier_datapath: str = ".", + configure: bool = True, + synchronize: bool = True, + ) -> None: """Build a CV backend using OpenCV's cascade matching options.""" super(CascadeFinder, self).__init__(configure=False, synchronize=False) @@ -1709,15 +2115,18 @@ def __init__(self, classifier_datapath: str = ".", configure: bool = True, if configure: self.__configure_backend(reset=True) - def __configure_backend(self, backend: str = None, category: str = "cascade", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "cascade", reset: bool = False + ) -> None: """ Custom implementation of the base method. See base method for details. """ if category != "cascade": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(CascadeFinder, self).configure_backend("cascade", reset=True) @@ -1730,8 +2139,9 @@ def __configure_backend(self, backend: str = None, category: str = "cascade", self.params[category]["minHeight"] = CVParameter(0, 0, None, 100.0) self.params[category]["maxHeight"] = CVParameter(1000, 0, None, 100.0) - def configure_backend(self, backend: str = None, category: str = "cascade", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "cascade", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -1755,25 +2165,35 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]": import cv2 import numpy + needle_cascade = cv2.CascadeClassifier(needle.data_file) if needle_cascade.empty(): raise Exception("Could not load the cascade classifier properly") - gray_haystack = cv2.cvtColor(numpy.array(haystack.pil_image), cv2.COLOR_RGB2GRAY) + gray_haystack = cv2.cvtColor( + numpy.array(haystack.pil_image), cv2.COLOR_RGB2GRAY + ) canvas = numpy.array(haystack.pil_image) from .match import Match + matches = [] - rects = needle_cascade.detectMultiScale(gray_haystack, - self.params["cascade"]["scaleFactor"].value, - self.params["cascade"]["minNeighbors"].value, - 0, - (self.params["cascade"]["minWidth"].value, - self.params["cascade"]["minHeight"].value), - (self.params["cascade"]["maxWidth"].value, - self.params["cascade"]["maxHeight"].value)) - for (x, y, w, h) in rects: - cv2.rectangle(canvas, (x, y), (x+w, y+h), (0, 0, 0), 2) - cv2.rectangle(canvas, (x, y), (x+w, y+h), (255, 0, 0), 1) + rects = needle_cascade.detectMultiScale( + gray_haystack, + self.params["cascade"]["scaleFactor"].value, + self.params["cascade"]["minNeighbors"].value, + 0, + ( + self.params["cascade"]["minWidth"].value, + self.params["cascade"]["minHeight"].value, + ), + ( + self.params["cascade"]["maxWidth"].value, + self.params["cascade"]["maxHeight"].value, + ), + ) + for x, y, w, h in rects: + cv2.rectangle(canvas, (x, y), (x + w, y + h), (0, 0, 0), 2) + cv2.rectangle(canvas, (x, y), (x + w, y + h), (255, 0, 0), 1) dx, dy = needle.center_offset.x, needle.center_offset.y matches.append(Match(x, y, w, h, dx, dy)) @@ -1809,10 +2229,26 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: self.categories["threshold2"] = "threshold_filters2" self.categories["threshold3"] = "threshold_filters3" self.algorithms["text_matchers"] = ("mixed",) - self.algorithms["text_detectors"] = ("pytesseract", "east", "erstat", "contours", "components") - self.algorithms["text_recognizers"] = ("pytesseract", "tesserocr", "tesseract", "hmm", "beamSearch") - self.algorithms["threshold_filters2"] = tuple(self.algorithms["threshold_filters"]) - self.algorithms["threshold_filters3"] = tuple(self.algorithms["threshold_filters"]) + self.algorithms["text_detectors"] = ( + "pytesseract", + "east", + "erstat", + "contours", + "components", + ) + self.algorithms["text_recognizers"] = ( + "pytesseract", + "tesserocr", + "tesseract", + "hmm", + "beamSearch", + ) + self.algorithms["threshold_filters2"] = tuple( + self.algorithms["threshold_filters"] + ) + self.algorithms["threshold_filters3"] = tuple( + self.algorithms["threshold_filters"] + ) # other attributes self.erc1 = None @@ -1827,15 +2263,26 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: if synchronize: self.__synchronize(reset=False) - def __configure_backend(self, backend: str = None, category: str = "text", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "text", reset: bool = False + ) -> None: """ Custom implementation of the base method. See base method for details. """ - if category not in ["text", "tdetect", "ocr", "contour", "threshold", "threshold2", "threshold3"]: - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + if category not in [ + "text", + "tdetect", + "ocr", + "contour", + "threshold", + "threshold2", + "threshold3", + ]: + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) elif category in ["contour", "threshold"]: ContourFinder.configure_backend(self, backend, category, reset) return @@ -1859,8 +2306,10 @@ def __configure_backend(self, backend: str = None, category: str = "text", elif category == "ocr" and backend is None: backend = GlobalConfig.text_ocr_backend if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) log.log(9, "Setting backend for %s to %s", category, backend) self.params[category] = {} @@ -1872,8 +2321,9 @@ def __configure_backend(self, backend: str = None, category: str = "text", if backend == "pytesseract": # eng, deu, etc. (ISO 639-3) self.params[category]["language"] = CVParameter("eng") - self.params[category]["char_whitelist"] = CVParameter(" 0123456789abcdefghijklmnopqrst" - "uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + self.params[category]["char_whitelist"] = CVParameter( + " 0123456789abcdefghijklmnopqrst" "uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + ) # 0 original tesseract only, 1 neural nets LSTM only, 2 both, 3 anything available self.params[category]["oem"] = CVParameter(3, 0, 3, enumerated=True) # 13 different page segmentation modes - see Tesseract API @@ -1881,34 +2331,62 @@ def __configure_backend(self, backend: str = None, category: str = "text", self.params[category]["extra_configs"] = CVParameter("") self.params[category]["binarize_detection"] = CVParameter(False) self.params[category]["segment_line_max"] = CVParameter(1, 1, None, 1.0) - self.params[category]["recursion_height"] = CVParameter(0.3, 0.0, 1.0, 0.01) - self.params[category]["recursion_width"] = CVParameter(0.3, 0.0, 1.0, 0.01) + self.params[category]["recursion_height"] = CVParameter( + 0.3, 0.0, 1.0, 0.01 + ) + self.params[category]["recursion_width"] = CVParameter( + 0.3, 0.0, 1.0, 0.01 + ) elif backend == "east": # network input dimensions - must be divisible by 32, however currently only # 320x320 doesn't error out from the OpenCV implementation self.params[category]["input_res_x"] = CVParameter(320, 32, None, 32.0) self.params[category]["input_res_y"] = CVParameter(320, 32, None, 32.0) - self.params[category]["min_box_confidence"] = CVParameter(0.8, 0.0, 1.0, 0.1) + self.params[category]["min_box_confidence"] = CVParameter( + 0.8, 0.0, 1.0, 0.1 + ) elif backend == "erstat": self.params[category]["thresholdDelta"] = CVParameter(1, 1, 255, 50.0) - self.params[category]["minArea"] = CVParameter(0.00025, 0.0, 1.0, 0.25, 0.001) - self.params[category]["maxArea"] = CVParameter(0.13, 0.0, 1.0, 0.25, 0.001) - self.params[category]["minProbability"] = CVParameter(0.4, 0.0, 1.0, 0.25, 0.01) + self.params[category]["minArea"] = CVParameter( + 0.00025, 0.0, 1.0, 0.25, 0.001 + ) + self.params[category]["maxArea"] = CVParameter( + 0.13, 0.0, 1.0, 0.25, 0.001 + ) + self.params[category]["minProbability"] = CVParameter( + 0.4, 0.0, 1.0, 0.25, 0.01 + ) self.params[category]["nonMaxSuppression"] = CVParameter(True) - self.params[category]["minProbabilityDiff"] = CVParameter(0.1, 0.0, 1.0, 0.25, 0.01) - self.params[category]["minProbability2"] = CVParameter(0.3, 0.0, 1.0, 0.25, 0.01) + self.params[category]["minProbabilityDiff"] = CVParameter( + 0.1, 0.0, 1.0, 0.25, 0.01 + ) + self.params[category]["minProbability2"] = CVParameter( + 0.3, 0.0, 1.0, 0.25, 0.01 + ) elif backend == "contours": - self.params[category]["maxArea"] = CVParameter(10000, 0, None, 1000.0, 10.0) + self.params[category]["maxArea"] = CVParameter( + 10000, 0, None, 1000.0, 10.0 + ) self.params[category]["minWidth"] = CVParameter(1, 0, None, 100.0) self.params[category]["maxWidth"] = CVParameter(100, 0, None, 100.0) self.params[category]["minHeight"] = CVParameter(1, 0, None, 100.0) self.params[category]["maxHeight"] = CVParameter(100, 0, None, 100.0) - self.params[category]["minAspectRatio"] = CVParameter(0.1, 0.0, None, 10.0) - self.params[category]["maxAspectRatio"] = CVParameter(2.5, 0.0, None, 10.0) - self.params[category]["horizontalSpacing"] = CVParameter(10, 0, None, 10.0) - self.params[category]["verticalVariance"] = CVParameter(10, 0, None, 10.0) + self.params[category]["minAspectRatio"] = CVParameter( + 0.1, 0.0, None, 10.0 + ) + self.params[category]["maxAspectRatio"] = CVParameter( + 2.5, 0.0, None, 10.0 + ) + self.params[category]["horizontalSpacing"] = CVParameter( + 10, 0, None, 10.0 + ) + self.params[category]["verticalVariance"] = CVParameter( + 10, 0, None, 10.0 + ) # 0 horizontal, 1 vertical - self.params[category]["orientation"] = CVParameter(0, 0, 1, enumerated=True) + self.params[category]["orientation"] = CVParameter( + 0, 0, 1, enumerated=True + ) self.params[category]["minChars"] = CVParameter(3, 0, None, 2.0) elif backend == "components": # with equal delta and tolerance we ensure that only one failure will be @@ -1918,8 +2396,9 @@ def __configure_backend(self, backend: str = None, category: str = "text", if backend in ["tesseract", "tesserocr", "pytesseract"]: # eng, deu, etc. (ISO 639-3) self.params[category]["language"] = CVParameter("eng") - self.params[category]["char_whitelist"] = CVParameter(" 0123456789abcdefghijklmnopqrst" - "uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + self.params[category]["char_whitelist"] = CVParameter( + " 0123456789abcdefghijklmnopqrst" "uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + ) # 0 original tesseract only, 1 neural nets LSTM only, 2 both, 3 anything available self.params[category]["oem"] = CVParameter(3, 0, 3, enumerated=True) # 13 different page segmentation modes - see Tesseract API @@ -1927,20 +2406,30 @@ def __configure_backend(self, backend: str = None, category: str = "text", if backend == "pytesseract": self.params[category]["extra_configs"] = CVParameter("") # TODO: there could be a decent way to change component modes - self.params[category]["component_level"] = CVParameter(1, 1, 1, enumerated=True) + self.params[category]["component_level"] = CVParameter( + 1, 1, 1, enumerated=True + ) elif backend == "tesserocr": # TODO: there could be a decent way to change component modes - self.params[category]["component_level"] = CVParameter(1, 1, 1, enumerated=True) + self.params[category]["component_level"] = CVParameter( + 1, 1, 1, enumerated=True + ) else: # 0 OCR_LEVEL_WORD, 1 OCR_LEVEL_TEXT_LINE - self.params[category]["component_level"] = CVParameter(1, 0, 1, enumerated=True) + self.params[category]["component_level"] = CVParameter( + 1, 0, 1, enumerated=True + ) # perform custom image thresholding if set to true or leave it to the OCR self.params[category]["binarize_text"] = CVParameter(False) elif backend == "hmm": # 1 NM 2 CNN as classifiers for hidden markov models (see OpenCV documentation) - self.params[category]["classifier"] = CVParameter(1, 1, 2, enumerated=True) + self.params[category]["classifier"] = CVParameter( + 1, 1, 2, enumerated=True + ) # 0 OCR_LEVEL_WORD - self.params[category]["component_level"] = CVParameter(0, 0, 1, enumerated=True) + self.params[category]["component_level"] = CVParameter( + 0, 0, 1, enumerated=True + ) # perform custom image thresholding if set to true or leave it to the OCR self.params[category]["binarize_text"] = CVParameter(True) else: @@ -1952,20 +2441,31 @@ def __configure_backend(self, backend: str = None, category: str = "text", # border size to wrap around text field to improve recognition rate self.params[category]["border_size"] = CVParameter(10, 0, 100, 25.0) # 0 erode, 1 dilate, 2 both, 3 none - self.params[category]["erode_dilate"] = CVParameter(3, 0, 3, enumerated=True) + self.params[category]["erode_dilate"] = CVParameter( + 3, 0, 3, enumerated=True + ) # 0 MORPH_RECT, 1 MORPH_ELLIPSE, 2 MORPH_CROSS - self.params[category]["ed_kernel_type"] = CVParameter(0, 0, 2, enumerated=True) - self.params[category]["ed_kernel_width"] = CVParameter(1, 1, 1000, 250.0, 2.0) - self.params[category]["ed_kernel_height"] = CVParameter(1, 1, 1000, 250.0, 2.0) + self.params[category]["ed_kernel_type"] = CVParameter( + 0, 0, 2, enumerated=True + ) + self.params[category]["ed_kernel_width"] = CVParameter( + 1, 1, 1000, 250.0, 2.0 + ) + self.params[category]["ed_kernel_height"] = CVParameter( + 1, 1, 1000, 250.0, 2.0 + ) # perform distance transform if ture or not if false self.params[category]["distance_transform"] = CVParameter(False) # 1 CV_DIST_L1, 2 CV_DIST_L2, 3 CV_DIST_C - self.params[category]["dt_distance_type"] = CVParameter(1, 1, 3, enumerated=True) + self.params[category]["dt_distance_type"] = CVParameter( + 1, 1, 3, enumerated=True + ) # 0 (precise) or 3x3 or 5x5 (the latest only works with Euclidean distance CV_DIST_L2) self.params[category]["dt_mask_size"] = CVParameter(3, 0, 5, 8.0, 2.0) - def configure_backend(self, backend: str = None, category: str = "text", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "text", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -1973,9 +2473,15 @@ def configure_backend(self, backend: str = None, category: str = "text", """ self.__configure_backend(backend, category, reset) - def __configure(self, text_detector: str = None, text_recognizer: str = None, - threshold_filter: str = None, threshold_filter2: str = None, - threshold_filter3: str = None, reset: bool = True) -> None: + def __configure( + self, + text_detector: str = None, + text_recognizer: str = None, + threshold_filter: str = None, + threshold_filter2: str = None, + threshold_filter3: str = None, + reset: bool = True, + ) -> None: self.__configure_backend(category="text", reset=reset) self.__configure_backend(text_detector, "tdetect") self.__configure_backend(text_recognizer, "ocr") @@ -1984,10 +2490,16 @@ def __configure(self, text_detector: str = None, text_recognizer: str = None, self.__configure_backend(threshold_filter2, "threshold2") self.__configure_backend(threshold_filter3, "threshold3") - def configure(self, text_detector: str = None, text_recognizer: str = None, - threshold_filter: str = None, threshold_filter2: str = None, - threshold_filter3: str = None, reset: bool = True, - **kwargs: dict[str, type]) -> None: + def configure( + self, + text_detector: str = None, + text_recognizer: str = None, + threshold_filter: str = None, + threshold_filter2: str = None, + threshold_filter3: str = None, + reset: bool = True, + **kwargs: dict[str, type] + ) -> None: """ Custom implementation of the base method. @@ -1998,21 +2510,40 @@ def configure(self, text_detector: str = None, text_recognizer: str = None, :param threshold_filter3: additional threshold filter for distance transformation :param reset: whether to (re)set all parent configurations as well """ - self.__configure(text_detector, text_recognizer, - threshold_filter, threshold_filter2, threshold_filter3, - reset) - - def __synchronize_backend(self, backend: str = None, category: str = "text", - reset: bool = False) -> None: - if category not in ["text", "tdetect", "ocr", "contour", "threshold", "threshold2", "threshold3"]: - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + self.__configure( + text_detector, + text_recognizer, + threshold_filter, + threshold_filter2, + threshold_filter3, + reset, + ) + + def __synchronize_backend( + self, backend: str = None, category: str = "text", reset: bool = False + ) -> None: + if category not in [ + "text", + "tdetect", + "ocr", + "contour", + "threshold", + "threshold2", + "threshold3", + ]: + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: Finder.synchronize_backend(self, "text", reset=True) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) backend = self.params[category]["backend"] import cv2 + datapath = self.params["text"]["datapath"].value tessdata_path = os.path.join(datapath, "tessdata") if not os.path.exists(tessdata_path): @@ -2026,28 +2557,47 @@ def __synchronize_backend(self, backend: str = None, category: str = "text", elif category == "tdetect" and backend == "pytesseract": import pytesseract + self.tbox = pytesseract - tessdata_dir = "--tessdata-dir '" + tessdata_path + "'" if tessdata_path else "" + tessdata_dir = ( + "--tessdata-dir '" + tessdata_path + "'" if tessdata_path else "" + ) self.tbox_config = r"%s --oem %s --psm %s " - self.tbox_config %= (tessdata_dir, - self.params["tdetect"]["oem"].value, - self.params["tdetect"]["psmode"].value) - self.tbox_config += r"-c tessedit_char_whitelist='%s' %s batch.nochop wordstrbox" - self.tbox_config %= (self.params["tdetect"]["char_whitelist"].value, - self.params["tdetect"]["extra_configs"].value) + self.tbox_config %= ( + tessdata_dir, + self.params["tdetect"]["oem"].value, + self.params["tdetect"]["psmode"].value, + ) + self.tbox_config += ( + r"-c tessedit_char_whitelist='%s' %s batch.nochop wordstrbox" + ) + self.tbox_config %= ( + self.params["tdetect"]["char_whitelist"].value, + self.params["tdetect"]["extra_configs"].value, + ) elif category == "tdetect" and backend == "east": - self.east_net = cv2.dnn.readNet(os.path.join(datapath, 'frozen_east_text_detection.pb')) + self.east_net = cv2.dnn.readNet( + os.path.join(datapath, "frozen_east_text_detection.pb") + ) elif category == "tdetect" and backend == "erstat": - self.erc1 = cv2.text.loadClassifierNM1(os.path.join(datapath, 'trained_classifierNM1.xml')) - self.erf1 = cv2.text.createERFilterNM1(self.erc1, - self.params["tdetect"]["thresholdDelta"].value, - self.params["tdetect"]["minArea"].value, - self.params["tdetect"]["maxArea"].value, - self.params["tdetect"]["minProbability"].value, - self.params["tdetect"]["nonMaxSuppression"].value, - self.params["tdetect"]["minProbabilityDiff"].value) - self.erc2 = cv2.text.loadClassifierNM2(os.path.join(datapath, 'trained_classifierNM2.xml')) - self.erf2 = cv2.text.createERFilterNM2(self.erc2, self.params["tdetect"]["minProbability2"].value) + self.erc1 = cv2.text.loadClassifierNM1( + os.path.join(datapath, "trained_classifierNM1.xml") + ) + self.erf1 = cv2.text.createERFilterNM1( + self.erc1, + self.params["tdetect"]["thresholdDelta"].value, + self.params["tdetect"]["minArea"].value, + self.params["tdetect"]["maxArea"].value, + self.params["tdetect"]["minProbability"].value, + self.params["tdetect"]["nonMaxSuppression"].value, + self.params["tdetect"]["minProbabilityDiff"].value, + ) + self.erc2 = cv2.text.loadClassifierNM2( + os.path.join(datapath, "trained_classifierNM2.xml") + ) + self.erf2 = cv2.text.createERFilterNM2( + self.erc2, self.params["tdetect"]["minProbability2"].value + ) elif category == "tdetect": # nothing to sync return @@ -2055,30 +2605,45 @@ def __synchronize_backend(self, backend: str = None, category: str = "text", elif category == "ocr": if backend == "pytesseract": import pytesseract + self.ocr = pytesseract - tessdata_dir = "--tessdata-dir '" + tessdata_path + "'" if tessdata_path else "" + tessdata_dir = ( + "--tessdata-dir '" + tessdata_path + "'" if tessdata_path else "" + ) self.ocr_config = r"%s --oem %s --psm %s " - self.ocr_config %= (tessdata_dir, - self.params["ocr"]["oem"].value, - self.params["ocr"]["psmode"].value) + self.ocr_config %= ( + tessdata_dir, + self.params["ocr"]["oem"].value, + self.params["ocr"]["psmode"].value, + ) self.ocr_config += r"-c tessedit_char_whitelist='%s' %s" - self.ocr_config %= (self.params["ocr"]["char_whitelist"].value, - self.params["ocr"]["extra_configs"].value) + self.ocr_config %= ( + self.params["ocr"]["char_whitelist"].value, + self.params["ocr"]["extra_configs"].value, + ) elif backend == "tesserocr": from tesserocr import PyTessBaseAPI - kwargs = {"lang": self.params["ocr"]["language"].value, - "oem": self.params["ocr"]["oem"].value, - "psm": self.params["ocr"]["psmode"].value} + + kwargs = { + "lang": self.params["ocr"]["language"].value, + "oem": self.params["ocr"]["oem"].value, + "psm": self.params["ocr"]["psmode"].value, + } if tessdata_path: self.ocr = PyTessBaseAPI(path=tessdata_path, **kwargs) else: self.ocr = PyTessBaseAPI(**kwargs) - self.ocr.SetVariable("tessedit_char_whitelist", self.params["ocr"]["char_whitelist"].value) + self.ocr.SetVariable( + "tessedit_char_whitelist", + self.params["ocr"]["char_whitelist"].value, + ) elif backend == "tesseract": - kwargs = {"language": self.params["ocr"]["language"].value, - "char_whitelist": self.params["ocr"]["char_whitelist"].value, - "oem": self.params["ocr"]["oem"].value, - "psmode": self.params["ocr"]["psmode"].value} + kwargs = { + "language": self.params["ocr"]["language"].value, + "char_whitelist": self.params["ocr"]["char_whitelist"].value, + "oem": self.params["ocr"]["oem"].value, + "psmode": self.params["ocr"]["psmode"].value, + } if tessdata_path: self.ocr = cv2.text.OCRTesseract_create(datapath, **kwargs) else: @@ -2086,34 +2651,55 @@ def __synchronize_backend(self, backend: str = None, category: str = "text", elif backend in ["hmm", "beamSearch"]: import numpy + # vocabulary is strictly related with the XML data so remains hardcoded here - vocabulary = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - with open(os.path.join(datapath, 'OCRHMM_transitions_table.xml')) as f: + vocabulary = ( + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + ) + with open(os.path.join(datapath, "OCRHMM_transitions_table.xml")) as f: transition_p_xml = f.read() - transition_p_data = re.search("(.*)", - transition_p_xml.replace("\n", " ")) - assert transition_p_data is not None, "Corrupted transition probability data" - transition_p = numpy.fromstring(transition_p_data.group(1).strip(), sep=' ').reshape(62, 62) + transition_p_data = re.search( + "(.*)", transition_p_xml.replace("\n", " ") + ) + assert ( + transition_p_data is not None + ), "Corrupted transition probability data" + transition_p = numpy.fromstring( + transition_p_data.group(1).strip(), sep=" " + ).reshape(62, 62) emission_p = numpy.eye(62, dtype=numpy.float64) if backend == "hmm": - classifier_data = os.path.join(datapath, 'OCRHMM_knn_model_data.xml.gz') + classifier_data = os.path.join( + datapath, "OCRHMM_knn_model_data.xml.gz" + ) if self.params["ocr"]["classifier"].value == 1: classifier = cv2.text.loadOCRHMMClassifierNM(classifier_data) elif self.params["ocr"]["classifier"].value == 2: classifier = cv2.text.loadOCRHMMClassifierCNN(classifier_data) else: - raise ValueError("Invalid classifier selected for OCR - must be NM or CNN") - self.ocr = cv2.text.OCRHMMDecoder_create(classifier, vocabulary, transition_p, emission_p) + raise ValueError( + "Invalid classifier selected for OCR - must be NM or CNN" + ) + self.ocr = cv2.text.OCRHMMDecoder_create( + classifier, vocabulary, transition_p, emission_p + ) else: - classifier_data = os.path.join(datapath, 'OCRBeamSearch_CNN_model_data.xml.gz') - classifier = cv2.text.loadOCRBeamSearchClassifierCNN(classifier_data) - self.ocr = cv2.text.OCRBeamSearchDecoder_create(classifier, vocabulary, transition_p, emission_p) + classifier_data = os.path.join( + datapath, "OCRBeamSearch_CNN_model_data.xml.gz" + ) + classifier = cv2.text.loadOCRBeamSearchClassifierCNN( + classifier_data + ) + self.ocr = cv2.text.OCRBeamSearchDecoder_create( + classifier, vocabulary, transition_p, emission_p + ) else: raise ValueError("Invalid OCR backend '%s'" % backend) - def synchronize_backend(self, backend: str = None, category: str = "text", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "text", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -2121,9 +2707,15 @@ def synchronize_backend(self, backend: str = None, category: str = "text", """ self.__synchronize_backend(backend, category, reset) - def __synchronize(self, text_detector: str = None, text_recognizer: str = None, - threshold_filter: str = None, threshold_filter2: str = None, - threshold_filter3: str = None, reset: bool = True) -> None: + def __synchronize( + self, + text_detector: str = None, + text_recognizer: str = None, + threshold_filter: str = None, + threshold_filter2: str = None, + threshold_filter3: str = None, + reset: bool = True, + ) -> None: self.__synchronize_backend(category="text", reset=reset) self.__synchronize_backend(text_detector, "tdetect") self.__synchronize_backend(text_recognizer, "ocr") @@ -2132,9 +2724,15 @@ def __synchronize(self, text_detector: str = None, text_recognizer: str = None, self.__synchronize_backend(threshold_filter2, "threshold2") self.__synchronize_backend(threshold_filter3, "threshold3") - def synchronize(self, text_detector: str = None, text_recognizer: str = None, - threshold_filter: str = None, threshold_filter2: str = None, - threshold_filter3: str = None, reset: bool = True) -> None: + def synchronize( + self, + text_detector: str = None, + text_recognizer: str = None, + threshold_filter: str = None, + threshold_filter2: str = None, + threshold_filter3: str = None, + reset: bool = True, + ) -> None: """ Custom implementation of the base method. @@ -2145,9 +2743,14 @@ def synchronize(self, text_detector: str = None, text_recognizer: str = None, :param threshold_filter3: additional threshold filter for distance transformation :param reset: whether to (re)set all parent configurations as well """ - self.__synchronize(text_detector, text_recognizer, - threshold_filter, threshold_filter2, threshold_filter3, - reset) + self.__synchronize( + text_detector, + text_recognizer, + threshold_filter, + threshold_filter2, + threshold_filter3, + reset, + ) def find(self, needle: "Text", haystack: "Image") -> "list[Match]": """ @@ -2165,6 +2768,7 @@ def find(self, needle: "Text", haystack: "Image") -> "list[Match]": import cv2 import numpy + text_needle = needle.value img_haystack = numpy.array(haystack.pil_image) final_hotmap = numpy.array(haystack.pil_image) @@ -2183,13 +2787,17 @@ def find(self, needle: "Text", haystack: "Image") -> "list[Match]": elif backend == "components": text_regions = self._detect_text_components(haystack) else: - raise UnsupportedBackendError("Unsupported text detection backend %s" % backend) + raise UnsupportedBackendError( + "Unsupported text detection backend %s" % backend + ) # perform optical character recognition on the final regions backend = self.params["ocr"]["backend"] log.debug("Recognizing text with %s", backend) from .match import Match + matches = [] + def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike": if self.params["ocr"]["binarize_text"].value: first_threshold = self.params["threshold"] @@ -2201,26 +2809,41 @@ def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike": return text_img else: return cv2.cvtColor(text_img, cv2.COLOR_RGB2GRAY) + for i, text_box in enumerate(text_regions): # main OCR preprocessing stage border = self.params["ocr"]["border_size"].value - text_img = img_haystack[max(text_box[1]-border, 0):min(text_box[1]+text_box[3]+border, img_haystack.shape[0]), - max(text_box[0]-border, 0):min(text_box[0]+text_box[2]+border, img_haystack.shape[1])] + text_img = img_haystack[ + max(text_box[1] - border, 0) : min( + text_box[1] + text_box[3] + border, img_haystack.shape[0] + ), + max(text_box[0] - border, 0) : min( + text_box[0] + text_box[2] + border, img_haystack.shape[1] + ), + ] factor = self.params["ocr"]["zoom_factor"].value log.debug("Zooming x%i candidate for improved OCR processing", factor) text_img = cv2.resize(text_img, None, fx=factor, fy=factor) text_img = binarize_step("threshold2", text_img) if self.params["ocr"]["distance_transform"].value: - text_img = cv2.distanceTransform(text_img, - self.params["ocr"]["dt_distance_type"].value, - self.params["ocr"]["dt_mask_size"].value) - text_img = cv2.cvtColor(numpy.asarray(text_img, dtype='uint8'), cv2.COLOR_GRAY2RGB) + text_img = cv2.distanceTransform( + text_img, + self.params["ocr"]["dt_distance_type"].value, + self.params["ocr"]["dt_mask_size"].value, + ) + text_img = cv2.cvtColor( + numpy.asarray(text_img, dtype="uint8"), cv2.COLOR_GRAY2RGB + ) text_img = binarize_step("threshold3", text_img) if self.params["ocr"]["erode_dilate"].value < 3: - element = cv2.getStructuringElement(self.params["ocr"]["ed_kernel_type"].value, - (self.params["ocr"]["ed_kernel_width"].value, - self.params["ocr"]["ed_kernel_height"].value)) + element = cv2.getStructuringElement( + self.params["ocr"]["ed_kernel_type"].value, + ( + self.params["ocr"]["ed_kernel_width"].value, + self.params["ocr"]["ed_kernel_height"].value, + ), + ) if self.params["ocr"]["erode_dilate"].value in [0, 2]: text_img = cv2.erode(text_img, element) if self.params["ocr"]["erode_dilate"].value in [1, 2]: @@ -2229,35 +2852,44 @@ def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike": # BUG: we hit segfault when using the BeamSearch OCR backend so disallow it if backend == "beamSearch": - raise NotImplementedError("Current version of BeamSearch segfaults so it's not yet available") + raise NotImplementedError( + "Current version of BeamSearch segfaults so it's not yet available" + ) # TODO: we can do this now with pytesseract/tesserocr but have to evaluate its usefulness - #vector boxes; - #vector words; - #vector confidences; - #output = ocr.run(group_img, &boxes, &words, &confidences, cv2.text.OCR_LEVEL_WORD) + # vector boxes; + # vector words; + # vector confidences; + # output = ocr.run(group_img, &boxes, &words, &confidences, cv2.text.OCR_LEVEL_WORD) # redirection of tesseract's streams can only be done on the file descriptor level # sys.stdout = open(os.devnull, 'w') if backend == "pytesseract": - output = self.ocr.image_to_string(text_img, - lang=self.params["ocr"]["language"].value, - config=self.ocr_config) - logging.debug("Running pytesseract with extra command line %s", self.ocr_config) + output = self.ocr.image_to_string( + text_img, + lang=self.params["ocr"]["language"].value, + config=self.ocr_config, + ) + logging.debug( + "Running pytesseract with extra command line %s", self.ocr_config + ) elif backend == "tesserocr": self.ocr.SetImage(PIL.Image.fromarray(text_img)) output = self.ocr.GetUTF8Text() else: stdout_fd = sys.stdout.fileno() if hasattr(sys.stdout, "fileno") else 1 stderr_fd = sys.stderr.fileno() if hasattr(sys.stderr, "fileno") else 2 - null_fo = open(os.devnull, 'wb') - with os.fdopen(os.dup(stdout_fd), 'wb') as cpout_fo: - with os.fdopen(os.dup(stderr_fd), 'wb') as cperr_fo: + null_fo = open(os.devnull, "wb") + with os.fdopen(os.dup(stdout_fd), "wb") as cpout_fo: + with os.fdopen(os.dup(stderr_fd), "wb") as cperr_fo: sys.stdout.flush() sys.stderr.flush() os.dup2(null_fo.fileno(), stdout_fd) os.dup2(null_fo.fileno(), stderr_fd) - output = self.ocr.run(text_img, text_img, - self.params["ocr"]["min_confidence"].value, - self.params["ocr"]["component_level"].value) + output = self.ocr.run( + text_img, + text_img, + self.params["ocr"]["min_confidence"].value, + self.params["ocr"]["component_level"].value, + ) sys.stdout.flush() sys.stderr.flush() os.dup2(cpout_fo.fileno(), stdout_fd) @@ -2266,9 +2898,11 @@ def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike": if self.params["ocr"]["component_level"].value == 1: # strip of the new line character which is never useful output = output.rstrip() - log.debug("OCR output %s = '%s'", i+1, output) + log.debug("OCR output %s = '%s'", i + 1, output) - similarity = 1.0 - float(needle.distance_to(output)) / max(len(output), len(text_needle)) + similarity = 1.0 - float(needle.distance_to(output)) / max( + len(output), len(text_needle) + ) log.debug("Similarity = '%s'", similarity) self.imglog.similarities.append(similarity) if similarity >= self.params["find"]["similarity"].value: @@ -2276,8 +2910,8 @@ def binarize_step(threshold: str, text_img: "Matlike") -> "Matlike": self.imglog.locations.append((text_box[0], text_box[1])) x, y, w, h = text_box dx, dy = needle.center_offset.x, needle.center_offset.y - cv2.rectangle(final_hotmap, (x, y), (x+w, y+h), (0, 0, 0), 2) - cv2.rectangle(final_hotmap, (x, y), (x+w, y+h), (255, 255, 255), 1) + cv2.rectangle(final_hotmap, (x, y), (x + w, y + h), (0, 0, 0), 2) + cv2.rectangle(final_hotmap, (x, y), (x + w, y + h), (255, 255, 255), 1) matches.append(Match(x, y, w, h, dx, dy, similarity)) matches = sorted(matches, key=lambda x: x.similarity, reverse=True) @@ -2297,16 +2931,24 @@ def _detect_text_boxes(self, haystack: "Image") -> list[list[int]]: max_segment = self.params["tdetect"]["segment_line_max"].value for i in range(1, max_segment): hline = cv2.getStructuringElement(cv2.MORPH_RECT, (max_segment, i)) - hlopened = cv2.morphologyEx(detection_img, cv2.MORPH_OPEN, hline, iterations=1) + hlopened = cv2.morphologyEx( + detection_img, cv2.MORPH_OPEN, hline, iterations=1 + ) vline = cv2.getStructuringElement(cv2.MORPH_RECT, (i, max_segment)) - vlopened = cv2.morphologyEx(detection_img, cv2.MORPH_OPEN, vline, iterations=1) + vlopened = cv2.morphologyEx( + detection_img, cv2.MORPH_OPEN, vline, iterations=1 + ) detection_img -= hlopened detection_img -= vlopened else: detection_img = cv2.cvtColor(detection_img, cv2.COLOR_RGB2GRAY) - detection_width = int(self.params["tdetect"]["recursion_width"].value * haystack.width) - detection_height = int(self.params["tdetect"]["recursion_height"].value * haystack.height) + detection_width = int( + self.params["tdetect"]["recursion_width"].value * haystack.width + ) + detection_height = int( + self.params["tdetect"]["recursion_height"].value * haystack.height + ) char_canvas = detection_img text_canvas = numpy.array(haystack.pil_image) @@ -2320,12 +2962,15 @@ def _detect_text_boxes(self, haystack: "Image") -> list[list[int]]: region_w, region_h = next_region.shape[1], next_region.shape[0] # TODO: activate flag for word-only matching if there is enough interest for this - #output = self.tbox.image_to_boxes(next_region, self.params["tdetect"]["language"].value, + # output = self.tbox.image_to_boxes(next_region, self.params["tdetect"]["language"].value, # config=self.tbox_config, output_type=self.tbox.Output.DICT) # ...process dict - output = self.tbox.run_and_get_output(next_region, 'box', - self.params["tdetect"]["language"].value, - config=self.tbox_config) + output = self.tbox.run_and_get_output( + next_region, + "box", + self.params["tdetect"]["language"].value, + config=self.tbox_config, + ) for line in output.splitlines(): tokens = line.rstrip().split(" ", maxsplit=6) if tokens[0] != "WordStr": @@ -2342,16 +2987,24 @@ def _detect_text_boxes(self, haystack: "Image") -> list[list[int]]: logging.debug("Empty text found, skipping region") continue if (w > detection_width and h > 0) or (h > detection_height and w > 0): - subregion_npy = next_region[max(dy, 0):min(dy+h, region_h), - max(dx, 0):min(dx+w, region_w)] + subregion_npy = next_region[ + max(dy, 0) : min(dy + h, region_h), + max(dx, 0) : min(dx + w, region_w), + ] if next_region.shape != subregion_npy.shape: - logging.debug("Large region of size %sx%s detected, rescanning inside of it", w, h) + logging.debug( + "Large region of size %sx%s detected, rescanning inside of it", + w, + h, + ) recursive_regions.append((x, y, subregion_npy)) continue - logging.debug("Found text '%s' with tesseract-provided box %s", text, (x, y, w, h)) - cv2.rectangle(text_canvas, (x, y), (x+w, y+h), (0, 0, 0), 2) - cv2.rectangle(text_canvas, (x, y), (x+w, y+h), (0, 255, 0), 1) + logging.debug( + "Found text '%s' with tesseract-provided box %s", text, (x, y, w, h) + ) + cv2.rectangle(text_canvas, (x, y), (x + w, y + h), (0, 0, 0), 2) + cv2.rectangle(text_canvas, (x, y), (x + w, y + h), (0, 255, 0), 1) text_regions.append([x, y, w, h]) return text_regions @@ -2361,6 +3014,7 @@ def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int] #: https://www.pyimagesearch.com/2018/08/20/opencv-text-detection-east-text-detector/ import cv2 import numpy + img = numpy.array(haystack.pil_image) char_canvas = cv2.cvtColor(numpy.array(haystack.pil_image), cv2.COLOR_RGB2GRAY) text_canvas = numpy.array(haystack.pil_image) @@ -2368,21 +3022,27 @@ def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int] self.imglog.hotmaps.append(text_canvas) # resize the image to resolution compatible with the model - inp_width, inp_height = (self.params["tdetect"]["input_res_x"].value, - self.params["tdetect"]["input_res_y"].value) + inp_width, inp_height = ( + self.params["tdetect"]["input_res_x"].value, + self.params["tdetect"]["input_res_y"].value, + ) width_ratio = img.shape[1] / float(inp_width) height_ratio = img.shape[0] / float(inp_height) img = cv2.resize(img, (inp_width, inp_height)) # convert to a model-compatible input using the mean from the training - inp = cv2.dnn.blobFromImage(img, mean=(123.68, 116.78, 103.94), swapRB=True, crop=False) + inp = cv2.dnn.blobFromImage( + img, mean=(123.68, 116.78, 103.94), swapRB=True, crop=False + ) self.east_net.setInput(inp) # select two output layers for the EAST detector model respectivelly for # the output probabilities and the text bounding box coordinates output_layers = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"] probability, geometry = self.east_net.forward(output_layers) - char_canvas[:] = cv2.resize(probability[0, 0]*255.0, (char_canvas.shape[1], char_canvas.shape[0])) + char_canvas[:] = cv2.resize( + probability[0, 0] * 255.0, (char_canvas.shape[1], char_canvas.shape[0]) + ) rects = [] for row in range(0, probability.shape[2]): @@ -2400,14 +3060,34 @@ def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int] # calculate the rotation angle from the prediction output sin, cos = numpy.sin(row_data[4][col]), numpy.cos(row_data[4][col]) # compute the starting (from ending) coordinates for the text bounding box - x2 = min(dx + cos * row_data[1][col] + sin * row_data[2][col], inp_width) * width_ratio - y2 = min(dy - sin * row_data[1][col] + cos * row_data[2][col], inp_height) * height_ratio + x2 = ( + min(dx + cos * row_data[1][col] + sin * row_data[2][col], inp_width) + * width_ratio + ) + y2 = ( + min( + dy - sin * row_data[1][col] + cos * row_data[2][col], inp_height + ) + * height_ratio + ) # the network might give unlimited region boundaries so limit by input width/height (above) x1, y1 = x2 - w, y2 - h rect = (int(x1), int(y1), int(w), int(h)) - cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2) - cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (255, 255, 255), 1) + cv2.rectangle( + char_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (0, 0, 0), + 2, + ) + cv2.rectangle( + char_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (255, 255, 255), + 1, + ) rects.append(rect) # TODO: needed for outsourced nonmaxima supression # confidences.append(row_scores[x]) @@ -2432,16 +3112,37 @@ def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int] for r2pair in region_queue: r2: tuple[int, int, int, int] = r2pair[0] # if the two regions intersect - if (r1[0] < r2[0] + r2[2] and r1[0] + r1[2] > r2[0] - and r1[1] < r2[1] + r2[3] and r1[1] + r1[3] > r2[1]): - r1 = [min(r1[0], r2[0]), min(r1[1], r2[1]), max(r1[2], r2[2]), max(r1[3], r2[3])] + if ( + r1[0] < r2[0] + r2[2] + and r1[0] + r1[2] > r2[0] + and r1[1] < r2[1] + r2[3] + and r1[1] + r1[3] > r2[1] + ): + r1 = [ + min(r1[0], r2[0]), + min(r1[1], r2[1]), + max(r1[2], r2[2]), + max(r1[3], r2[3]), + ] # second region will no longer be considered r2pair[1] = False # first region is now merged with all intersecting regions text_regions.append(r1) for rect in text_regions: - cv2.rectangle(text_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2) - cv2.rectangle(text_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 255), 1) + cv2.rectangle( + text_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (0, 0, 0), + 2, + ) + cv2.rectangle( + text_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (0, 0, 255), + 1, + ) logging.debug("A total of %s final text regions found", len(text_regions)) return text_regions @@ -2449,6 +3150,7 @@ def _detect_text_east(self, haystack: "Image") -> list[tuple[int, int, int, int] def _detect_text_erstat(self, haystack: "Image") -> list[tuple[int, int, int, int]]: import cv2 import numpy + img = numpy.array(haystack.pil_image) char_canvas = numpy.array(haystack.pil_image) text_canvas = numpy.array(haystack.pil_image) @@ -2458,32 +3160,68 @@ def _detect_text_erstat(self, haystack: "Image") -> list[tuple[int, int, int, in # extract channels to be processed individually - B, G, R, lightness, and gradient magnitude channels = list(cv2.text.computeNMChannels(img)) # append negative channels to detect ER- (bright regions over dark background) skipping the gradient channel - channel_num_without_grad = len(channels)-1 + channel_num_without_grad = len(channels) - 1 for i in range(0, channel_num_without_grad): - channels.append(255-channels[i]) + channels.append(255 - channels[i]) char_regions = [] text_regions = [] # apply the default cascade classifier to each independent channel - log.debug("Extracting class specific extremal regions from %s channels", len(channels)) + log.debug( + "Extracting class specific extremal regions from %s channels", len(channels) + ) for i, channel in enumerate(channels): # one liner for "erf1.run(channel)" then "erf2.run(channel)" regions = cv2.text.detectRegions(channel, self.erf1, self.erf2) - logging.debug("A total of %s possible character regions found on channel %s", len(regions), i) + logging.debug( + "A total of %s possible character regions found on channel %s", + len(regions), + i, + ) rects = [cv2.boundingRect(p.reshape(-1, 1, 2)) for p in regions] for rect in rects: - cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2) - cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 255), 1) + cv2.rectangle( + char_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (0, 0, 0), + 2, + ) + cv2.rectangle( + char_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (0, 0, 255), + 1, + ) if len(regions) == 0: continue - region_groups = cv2.text.erGrouping(img, channel, [r.tolist() for r in regions]) - logging.debug("A total of %s possible text regions found on channel %s", len(region_groups), i) + region_groups = cv2.text.erGrouping( + img, channel, [r.tolist() for r in regions] + ) + logging.debug( + "A total of %s possible text regions found on channel %s", + len(region_groups), + i, + ) for rect in region_groups: - cv2.rectangle(text_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2) - cv2.rectangle(text_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 255, 0), 1) + cv2.rectangle( + text_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (0, 0, 0), + 2, + ) + cv2.rectangle( + text_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (0, 255, 0), + 1, + ) char_regions.extend(regions) text_regions.extend(region_groups) @@ -2501,18 +3239,30 @@ def _detect_text_erstat(self, haystack: "Image") -> list[tuple[int, int, int, in for r2pair in region_queue: r2, _ = r2pair # if the two regions intersect - if (r1[0] < r2[0] + r2[2] and r1[0] + r1[2] > r2[0] - and r1[1] < r2[1] + r2[3] and r1[1] + r1[3] > r2[1]): - r1 = [min(r1[0], r2[0]), min(r1[1], r2[1]), max(r1[2], r2[2]), max(r1[3], r2[3])] + if ( + r1[0] < r2[0] + r2[2] + and r1[0] + r1[2] > r2[0] + and r1[1] < r2[1] + r2[3] + and r1[1] + r1[3] > r2[1] + ): + r1 = [ + min(r1[0], r2[0]), + min(r1[1], r2[1]), + max(r1[2], r2[2]), + max(r1[3], r2[3]), + ] # second region will no longer be considered r2pair[1] = False # first region is now merged with all intersecting regions final_regions.append(r1) return final_regions - def _detect_text_contours(self, haystack: "Image") -> list[tuple[int, int, int, int]]: + def _detect_text_contours( + self, haystack: "Image" + ) -> list[tuple[int, int, int, int]]: import cv2 import numpy + img = numpy.array(haystack.pil_image) char_canvas = numpy.array(haystack.pil_image) text_canvas = numpy.array(haystack.pil_image) @@ -2526,27 +3276,39 @@ def _detect_text_contours(self, haystack: "Image") -> list[tuple[int, int, int, char_regions = [] for hcontour in haystack_contours: x, y, w, h = cv2.boundingRect(hcontour) - area, ratio = cv2.contourArea(hcontour), float(w)/h - if (area < self.params["contour"]["minArea"].value + area, ratio = cv2.contourArea(hcontour), float(w) / h + if ( + area < self.params["contour"]["minArea"].value or area > self.params["tdetect"]["maxArea"].value or w < self.params["tdetect"]["minWidth"].value or w > self.params["tdetect"]["maxWidth"].value or h < self.params["tdetect"]["minHeight"].value or h > self.params["tdetect"]["maxHeight"].value or ratio < self.params["tdetect"]["minAspectRatio"].value - or ratio > self.params["tdetect"]["maxAspectRatio"].value): - log.debug("Ignoring contour with area %sx%s>%s and aspect ratio %s/%s=%s", - w, h, area, w, h, ratio) + or ratio > self.params["tdetect"]["maxAspectRatio"].value + ): + log.debug( + "Ignoring contour with area %sx%s>%s and aspect ratio %s/%s=%s", + w, + h, + area, + w, + h, + ratio, + ) continue else: - cv2.rectangle(char_canvas, (x, y), (x+w, y+h), (0, 0, 0), 2) - cv2.rectangle(char_canvas, (x, y), (x+w, y+h), (0, 0, 255), 1) + cv2.rectangle(char_canvas, (x, y), (x + w, y + h), (0, 0, 0), 2) + cv2.rectangle(char_canvas, (x, y), (x + w, y + h), (0, 0, 255), 1) char_regions.append((x, y, w, h)) char_regions = sorted(char_regions, key=lambda x: x[0]) # group characters into horizontally-correlated regions text_regions = [] - dx, dy = self.params["tdetect"]["horizontalSpacing"].value, self.params["tdetect"]["verticalVariance"].value + dx, dy = ( + self.params["tdetect"]["horizontalSpacing"].value, + self.params["tdetect"]["verticalVariance"].value, + ) text_orientation = self.params["tdetect"]["orientation"].value min_chars_for_text = self.params["tdetect"]["minChars"].value for i, region1 in enumerate(char_regions): @@ -2561,28 +3323,49 @@ def _detect_text_contours(self, haystack: "Image") -> list[tuple[int, int, int, x1, y1, w1, h1 = region1 x2, y2, w2, h2 = region2 if text_orientation == 0: - is_text = x2 - (x1 + w1) < dx and x1 - (x2 + w2) < dx and abs(y1 - y2) < dy and abs(h1 - h2) < 2*dy + is_text = ( + x2 - (x1 + w1) < dx + and x1 - (x2 + w2) < dx + and abs(y1 - y2) < dy + and abs(h1 - h2) < 2 * dy + ) elif text_orientation == 1: - is_text = y2 - (y1 + h1) < dy and y1 - (y2 + h2) < dy and abs(x1 - x2) < dx and abs(w1 - w2) < 2*dx + is_text = ( + y2 - (y1 + h1) < dy + and y1 - (y2 + h2) < dy + and abs(x1 - x2) < dx + and abs(w1 - w2) < 2 * dx + ) if is_text: - region1 = (min(x1, x2), min(y1, y2), max(x1+w1, x2+w2)-min(x1, x2), max(y1+h1, y2+h2)-min(y1, y2)) + region1 = ( + min(x1, x2), + min(y1, y2), + max(x1 + w1, x2 + w2) - min(x1, x2), + max(y1 + h1, y2 + h2) - min(y1, y2), + ) chars_for_text += 1 char_regions[j] = None if chars_for_text < min_chars_for_text: - log.debug("Ignoring text contour with %s<%s characters", - chars_for_text, min_chars_for_text) + log.debug( + "Ignoring text contour with %s<%s characters", + chars_for_text, + min_chars_for_text, + ) continue x, y, w, h = region1 - cv2.rectangle(text_canvas, (x, y), (x+w, y+h), (0, 0, 0), 2) - cv2.rectangle(text_canvas, (x, y), (x+w, y+h), (0, 255, 0), 1) + cv2.rectangle(text_canvas, (x, y), (x + w, y + h), (0, 0, 0), 2) + cv2.rectangle(text_canvas, (x, y), (x + w, y + h), (0, 255, 0), 1) text_regions.append(region1) char_regions[i] = None return text_regions - def _detect_text_components(self, haystack: "Image") -> list[tuple[int, int, int, int]]: + def _detect_text_components( + self, haystack: "Image" + ) -> list[tuple[int, int, int, int]]: import cv2 import numpy + img = numpy.array(haystack.pil_image) char_canvas = numpy.array(haystack.pil_image) text_canvas = numpy.array(haystack.pil_image) @@ -2590,9 +3373,14 @@ def _detect_text_components(self, haystack: "Image") -> list[tuple[int, int, int self.imglog.hotmaps.append(text_canvas) connectivity = self.params["tdetect"]["connectivity"].value - label_num, label_img, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity, cv2.CV_32S) - logging.debug("Detected %s component labels with centroids: %s", label_num, - ", ".join([str((int(c[0]), int(c[1]))) for c in centroids])) + label_num, label_img, stats, centroids = cv2.connectedComponentsWithStats( + img, connectivity, cv2.CV_32S + ) + logging.debug( + "Detected %s component labels with centroids: %s", + label_num, + ", ".join([str((int(c[0]), int(c[1]))) for c in centroids]), + ) self.imglog.hotmaps.append(label_img * 255) for i in range(label_num): x, y = stats[i, cv2.CC_STAT_LEFT], stats[i, cv2.CC_STAT_TOP] @@ -2602,13 +3390,29 @@ def _detect_text_components(self, haystack: "Image") -> list[tuple[int, int, int continue else: rect = [x, y, w, h] - cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 0), 2) - cv2.rectangle(char_canvas, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 0, 255), 1) + cv2.rectangle( + char_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (0, 0, 0), + 2, + ) + cv2.rectangle( + char_canvas, + (rect[0], rect[1]), + (rect[0] + rect[2], rect[1] + rect[3]), + (0, 0, 255), + 1, + ) # TODO: log here since not fully implemented - self.imglog.hotmaps[-1] = cv2.normalize(label_img, label_img, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) + self.imglog.hotmaps[-1] = cv2.normalize( + label_img, label_img, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U + ) self.imglog.log(30) - raise NotImplementedError("The connected components method for text detection needs more labels") + raise NotImplementedError( + "The connected components method for text detection needs more labels" + ) # TODO: alternatively use cvBlobsLib # myblobs = CBlobResult(binary_image, mask, 0, True) @@ -2630,21 +3434,33 @@ def log(self, lvl: int) -> None: return # no hotmaps to log elif len(self.imglog.hotmaps) == 0: - raise MissingHotmapError("No matching was performed in order to be image logged") - - self.imglog.dump_hotmap("imglog%s-3hotmap-1char.png" % self.imglog.printable_step, - self.imglog.hotmaps[0]) - self.imglog.dump_hotmap("imglog%s-3hotmap-2text.png" % self.imglog.printable_step, - self.imglog.hotmaps[1]) - - for i in range(2, len(self.imglog.hotmaps)-1): - self.imglog.dump_hotmap("imglog%s-3hotmap-3ocr-%stext-%s.png" % (self.imglog.printable_step, i-1, - self.imglog.similarities[i-2]), - self.imglog.hotmaps[i]) - - similarity = max(self.imglog.similarities) if len(self.imglog.similarities) > 0 else 0.0 - self.imglog.dump_hotmap("imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity), - self.imglog.hotmaps[-1]) + raise MissingHotmapError( + "No matching was performed in order to be image logged" + ) + + self.imglog.dump_hotmap( + "imglog%s-3hotmap-1char.png" % self.imglog.printable_step, + self.imglog.hotmaps[0], + ) + self.imglog.dump_hotmap( + "imglog%s-3hotmap-2text.png" % self.imglog.printable_step, + self.imglog.hotmaps[1], + ) + + for i in range(2, len(self.imglog.hotmaps) - 1): + self.imglog.dump_hotmap( + "imglog%s-3hotmap-3ocr-%stext-%s.png" + % (self.imglog.printable_step, i - 1, self.imglog.similarities[i - 2]), + self.imglog.hotmaps[i], + ) + + similarity = ( + max(self.imglog.similarities) if len(self.imglog.similarities) > 0 else 0.0 + ) + self.imglog.dump_hotmap( + "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity), + self.imglog.hotmaps[-1], + ) self.imglog.clear() ImageLogger.step += 1 @@ -2678,10 +3494,20 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: if synchronize: FeatureFinder.synchronize(self, reset=False) - def __configure_backend(self, backend: str = None, category: str = "tempfeat", - reset: bool = False) -> None: - if category not in ["tempfeat", "template", "feature", "fdetect", "fextract", "fmatch"]: - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + def __configure_backend( + self, backend: str = None, category: str = "tempfeat", reset: bool = False + ) -> None: + if category not in [ + "tempfeat", + "template", + "feature", + "fdetect", + "fextract", + "fmatch", + ]: + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) elif category in ["feature", "fdetect", "fextract", "fmatch"]: FeatureFinder.configure_backend(self, backend, category, reset) return @@ -2694,15 +3520,18 @@ def __configure_backend(self, backend: str = None, category: str = "tempfeat", if backend is None: backend = "mixed" if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) self.params[category] = {} self.params[category]["backend"] = backend self.params[category]["front_similarity"] = CVParameter(0.7, 0.0, 1.0) - def configure_backend(self, backend: str = None, category: str = "tempfeat", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "tempfeat", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -2710,9 +3539,14 @@ def configure_backend(self, backend: str = None, category: str = "tempfeat", """ self.__configure_backend(backend, category, reset) - def __configure(self, template_match: str = None, feature_detect: str = None, - feature_extract: str = None, feature_match: str = None, - reset: bool = True) -> None: + def __configure( + self, + template_match: str = None, + feature_detect: str = None, + feature_extract: str = None, + feature_match: str = None, + reset: bool = True, + ) -> None: self.__configure_backend(category="tempfeat", reset=reset) self.__configure_backend(template_match, "template") self.__configure_backend(category="feature") @@ -2720,29 +3554,44 @@ def __configure(self, template_match: str = None, feature_detect: str = None, self.__configure_backend(feature_extract, "fextract") self.__configure_backend(feature_match, "fmatch") - def configure(self, template_match: str = None, feature_detect: str = None, - feature_extract: str = None, feature_match: str = None, - reset: bool = True, **kwargs: dict[str, type]) -> None: + def configure( + self, + template_match: str = None, + feature_detect: str = None, + feature_extract: str = None, + feature_match: str = None, + reset: bool = True, + **kwargs: dict[str, type] + ) -> None: """ Custom implementation of the base methods. See base methods for details. """ - self.__configure(template_match, feature_detect, feature_extract, feature_match, reset) - - def synchronize(self, feature_detect: str = None, feature_extract: str = None, - feature_match: str = None, reset: bool = True) -> None: + self.__configure( + template_match, feature_detect, feature_extract, feature_match, reset + ) + + def synchronize( + self, + feature_detect: str = None, + feature_extract: str = None, + feature_match: str = None, + reset: bool = True, + ) -> None: """ Custom implementation of the base method. See base method for details. """ Finder.synchronize_backend(self, "tempfeat", reset=reset) - FeatureFinder.synchronize(self, - feature_detect=feature_detect, - feature_extract=feature_extract, - feature_match=feature_match, - reset=False) + FeatureFinder.synchronize( + self, + feature_detect=feature_detect, + feature_extract=feature_extract, + feature_match=feature_match, + reset=False, + ) def find(self, needle: "Image", haystack: "Image") -> "list[Match]": """ @@ -2759,9 +3608,12 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": # use a different lower similarity for the template matching template_similarity = self.params["tempfeat"]["front_similarity"].value feature_similarity = self.params["find"]["similarity"].value - log.debug("Using tempfeat matching with template similarity %s " - "and feature similarity %s", template_similarity, - feature_similarity) + log.debug( + "Using tempfeat matching with template similarity %s " + "and feature similarity %s", + template_similarity, + feature_similarity, + ) # class-specific dependencies import cv2 @@ -2786,8 +3638,12 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": down = min(haystack.height, up + needle.height) left = upleft.x right = min(haystack.width, left + needle.width) - log.log(9, "Maximum up-down is %s and left-right is %s", - (up, down), (left, right)) + log.log( + 9, + "Maximum up-down is %s and left-right is %s", + (up, down), + (left, right), + ) haystack_region = hgray[up:down, left:right] haystack_region = haystack_region.copy() @@ -2799,21 +3655,32 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": self.imglog.hotmaps.append(hotmap_region) self.imglog.hotmaps.append(hotmap_region) - res = self._project_features(frame_points, ngray, haystack_region, feature_similarity) + res = self._project_features( + frame_points, ngray, haystack_region, feature_similarity + ) # if the feature matching succeeded or is worse than satisfactory template matching - if res is not None or (self.imglog.similarities[-1] > 0.0 - and self.imglog.similarities[-1] < self.imglog.similarities[i] - and self.imglog.similarities[i] > feature_similarity): + if res is not None or ( + self.imglog.similarities[-1] > 0.0 + and self.imglog.similarities[-1] < self.imglog.similarities[i] + and self.imglog.similarities[i] > feature_similarity + ): # take the template matching location rather than the feature one # for stability (they should ultimately be the same) - log.debug("Using template result %s instead of the worse feature result %s", - self.imglog.similarities[i], self.imglog.similarities[-1]) + log.debug( + "Using template result %s instead of the worse feature result %s", + self.imglog.similarities[i], + self.imglog.similarities[-1], + ) location = (left, up) self.imglog.locations[-1] = location - feature_maxima.append([self.imglog.hotmaps[-1], - self.imglog.similarities[-1], - self.imglog.locations[-1]]) + feature_maxima.append( + [ + self.imglog.hotmaps[-1], + self.imglog.similarities[-1], + self.imglog.locations[-1], + ] + ) # stitch back for a better final image logging final_hotmap[up:down, left:right] = hotmap_region @@ -2833,21 +3700,31 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": for i, _ in enumerate(template_maxima): # test the template match also against the actual required similarity if self.imglog.similarities[i] >= feature_similarity: - feature_maxima.append([self.imglog.hotmaps[i], - self.imglog.similarities[i], - self.imglog.locations[i]]) + feature_maxima.append( + [ + self.imglog.hotmaps[i], + self.imglog.similarities[i], + self.imglog.locations[i], + ] + ) # release the accumulated logging from subroutines ImageLogger.accumulate_logging = False if len(feature_maxima) == 0: - log.debug("No acceptable match with the given feature similarity %s", - feature_similarity) + log.debug( + "No acceptable match with the given feature similarity %s", + feature_similarity, + ) if len(self.imglog.similarities) > 1: # NOTE: handle cases when the matching failed at the feature stage, i.e. dump # a hotmap for debugging also in this case self.imglog.hotmaps.append(final_hotmap) - self.imglog.similarities.append(self.imglog.similarities[len(template_maxima)]) - self.imglog.locations.append(self.imglog.locations[len(template_maxima)]) + self.imglog.similarities.append( + self.imglog.similarities[len(template_maxima)] + ) + self.imglog.locations.append( + self.imglog.locations[len(template_maxima)] + ) elif len(self.imglog.similarities) == 1: # NOTE: we are only interested in the template hotmap on template failure self.imglog.hotmaps.append(self.imglog.hotmaps[0]) @@ -2856,14 +3733,27 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": matches = [] from .match import Match + maxima = sorted(feature_maxima, key=lambda x: x[1], reverse=True) for maximum in maxima: similarity = maximum[1] x, y = maximum[2] w, h = needle.width, needle.height dx, dy = needle.center_offset.x, needle.center_offset.y - cv2.rectangle(final_hotmap, (x, y), (x+needle.width, y+needle.height), (0, 0, 0), 2) - cv2.rectangle(final_hotmap, (x, y), (x+needle.width, y+needle.height), (0, 0, 255), 1) + cv2.rectangle( + final_hotmap, + (x, y), + (x + needle.width, y + needle.height), + (0, 0, 0), + 2, + ) + cv2.rectangle( + final_hotmap, + (x, y), + (x + needle.width, y + needle.height), + (0, 0, 255), + 1, + ) matches.append(Match(x, y, w, h, dx, dy, similarity)) self.imglog.hotmaps.append(final_hotmap) # log one best match for final hotmap filename @@ -2889,26 +3779,36 @@ def log(self, lvl: int) -> None: return # no hotmaps to log elif len(self.imglog.hotmaps) == 0: - raise MissingHotmapError("No matching was performed in order to be image logged") + raise MissingHotmapError( + "No matching was performed in order to be image logged" + ) # knowing how the tempfeat works this estimates # the expected number of cases starting from 1 (i+1) # to make sure the winner is the first alphabetically candidate_num = int(len(self.imglog.similarities) / 2) for i in range(candidate_num): - name = "imglog%s-3hotmap-%stemplate-%s.png" % (self.imglog.printable_step, - i + 1, self.imglog.similarities[i]) + name = "imglog%s-3hotmap-%stemplate-%s.png" % ( + self.imglog.printable_step, + i + 1, + self.imglog.similarities[i], + ) self.imglog.dump_hotmap(name, self.imglog.hotmaps[i]) ii = candidate_num + i - hii = candidate_num + i*4 + 3 - #self.imglog.log_locations(30, [self.imglog.locations[ii]], self.imglog.hotmaps[hii], 4, 255, 0, 0) - name = "imglog%s-3hotmap-%sfeature-%s.png" % (self.imglog.printable_step, - i + 1, self.imglog.similarities[ii]) + hii = candidate_num + i * 4 + 3 + # self.imglog.log_locations(30, [self.imglog.locations[ii]], self.imglog.hotmaps[hii], 4, 255, 0, 0) + name = "imglog%s-3hotmap-%sfeature-%s.png" % ( + self.imglog.printable_step, + i + 1, + self.imglog.similarities[ii], + ) self.imglog.dump_hotmap(name, self.imglog.hotmaps[hii]) if len(self.imglog.similarities) % 2 == 1: - name = "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, - self.imglog.similarities[-1]) + name = "imglog%s-3hotmap-%s.png" % ( + self.imglog.printable_step, + self.imglog.similarities[-1], + ) self.imglog.dump_hotmap(name, self.imglog.hotmaps[-1]) self.imglog.clear() @@ -2926,8 +3826,12 @@ class DeepFinder(Finder): _cache = {} - def __init__(self, classifier_datapath: str = ".", configure: bool = True, - synchronize: bool = True) -> None: + def __init__( + self, + classifier_datapath: str = ".", + configure: bool = True, + synchronize: bool = True, + ) -> None: """Build a CV backend using OpenCV's text matching options.""" super(DeepFinder, self).__init__(configure=False, synchronize=False) @@ -2944,22 +3848,27 @@ def __init__(self, classifier_datapath: str = ".", configure: bool = True, if synchronize: self.__synchronize_backend(reset=False) - def __configure_backend(self, backend: str = None, category: str = "deep", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "deep", reset: bool = False + ) -> None: """ Custom implementation of the base method. See base method for details. """ if category != "deep": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(DeepFinder, self).configure_backend("deep", reset=True) if backend is None: backend = GlobalConfig.deep_learn_backend if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) self.params[category] = {} self.params[category]["backend"] = backend @@ -2973,8 +3882,9 @@ def __configure_backend(self, backend: str = None, category: str = "deep", # file to load pre-trained model weights from self.params[category]["model"] = CVParameter("") - def configure_backend(self, backend: str = None, category: str = "deep", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "deep", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -2982,14 +3892,19 @@ def configure_backend(self, backend: str = None, category: str = "deep", """ self.__configure_backend(backend, category, reset) - def __synchronize_backend(self, backend: str = None, category: str = "deep", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "deep", reset: bool = False + ) -> None: if category != "deep": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(DeepFinder, self).synchronize_backend("deep", reset=True) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) backend = self.params[category]["backend"] # reuse or cache a unique model depending on arch and checkpoint @@ -3010,12 +3925,14 @@ def __synchronize_backend(self, backend: str = None, category: str = "deep", else: # only models pretrained on the COCO dataset are available is_pretrained = model_checkpoint == "" and model_classes == 91 - model = models.__dict__[model_arch](pretrained=is_pretrained, - num_classes=model_classes) + model = models.__dict__[model_arch]( + pretrained=is_pretrained, num_classes=model_classes + ) # load .pth or .pkl data file if pretrained model is available if model_checkpoint: - model.load_state_dict(torch.load(model_checkpoint, - map_location="cpu")) + model.load_state_dict( + torch.load(model_checkpoint, map_location="cpu") + ) self._cache[model_id] = model device_opt = self.params[category]["device"].value @@ -3031,19 +3948,20 @@ def __synchronize_backend(self, backend: str = None, category: str = "deep", elif backend == "tensorflow": # class-specific dependencies import tensorflow as tf + tf.keras.backend.clear_session() # TODO: current TensorFlow model zoo/garden API is too unstable from research.object_detection.utils import config_util from research.object_detection.builders import model_builder # TODO: the model ARCH and CHECKPOINT need extra path flexibility - #tf_models_dir = 'models/research/object_detection' - #model_arch = os.path.join(tf_models_dir, 'configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config') - #model_checkpoint = os.path.join(tf_models_dir, 'test_data/checkpoint/ckpt-0') + # tf_models_dir = 'models/research/object_detection' + # model_arch = os.path.join(tf_models_dir, 'configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config') + # model_checkpoint = os.path.join(tf_models_dir, 'test_data/checkpoint/ckpt-0') # load pipeline config and build a detection model configs = config_util.get_configs_from_pipeline_file(model_arch) - model_config = configs['model'] + model_config = configs["model"] self.net = model_builder.build(model_config=model_config, is_training=False) ckpt = tf.compat.v2.train.Checkpoint(model=self.net) @@ -3052,8 +3970,9 @@ def __synchronize_backend(self, backend: str = None, category: str = "deep", else: raise ValueError("Invalid DL backend '%s'" % backend) - def synchronize_backend(self, backend: str = None, category: str = "deep", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "deep", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -3083,10 +4002,12 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]": backend = self.params["deep"]["backend"] if backend == "tensorflow": - raise NotImplementedError("The TensorFlow model zoo/garden libary " - "is too unstable at present") + raise NotImplementedError( + "The TensorFlow model zoo/garden libary " "is too unstable at present" + ) assert backend == "pytorch", "Only PyTorch model zoo/garden is supported" import torch + classes: Callable[[Any], str] = None if needle.data_file is not None: with open(needle.data_file, "rt") as f: @@ -3101,6 +4022,7 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]": # convert haystack data to tensor variable from torchvision import transforms + img = haystack.pil_image transform = transforms.Compose([transforms.ToTensor()]) img = transform(img) @@ -3113,19 +4035,25 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]": matches = [] from .match import Match - for i in range(len(pred[0]['labels'])): - label = classes(pred[0]['labels'][i].cpu().item()) - score = pred[0]['scores'][i].cpu().item() - x, y, w, h = list(pred[0]['boxes'][i].cpu().numpy()) - rect = (int(x), int(y), int(x+w), int(y+h)) + + for i in range(len(pred[0]["labels"])): + label = classes(pred[0]["labels"][i].cpu().item()) + score = pred[0]["scores"][i].cpu().item() + x, y, w, h = list(pred[0]["boxes"][i].cpu().numpy()) + rect = (int(x), int(y), int(x + w), int(y + h)) from PIL import ImageDraw + draw = ImageDraw.Draw(full_hotmap) draw.rectangle(rect, outline=(255, 0, 0)) draw.text((rect[0], rect[1]), label, fill=(255, 0, 0, 0)) if score < similarity: - logging.debug("Found %s has a low confidence score %s<%s, skipping", - label, score, similarity) + logging.debug( + "Found %s has a low confidence score %s<%s, skipping", + label, + score, + similarity, + ) continue draw = ImageDraw.Draw(filtered_hotmap) draw.rectangle(rect, outline=(0, 255, 0)) @@ -3133,8 +4061,9 @@ def find(self, needle: "Pattern", haystack: "Image") -> "list[Match]": if label != needle_class: logging.debug("Found %s is not %s, skipping", label, needle_class) continue - logging.debug("Found %s with sufficient confidence %s at (%s, %s)", - label, score, x, y) + logging.debug( + "Found %s with sufficient confidence %s at (%s, %s)", label, score, x, y + ) draw = ImageDraw.Draw(final_hotmap) draw.rectangle(rect, outline=(0, 0, 255)) @@ -3164,14 +4093,22 @@ def log(self, lvl: int) -> None: return # no hotmaps to log elif len(self.imglog.hotmaps) == 0: - raise MissingHotmapError("No matching was performed in order to be image logged") - - self.imglog.dump_hotmap("imglog%s-3hotmap-1full.png" % self.imglog.printable_step, - self.imglog.hotmaps[0]) - self.imglog.dump_hotmap("imglog%s-3hotmap-2filtered.png" % self.imglog.printable_step, - self.imglog.hotmaps[1]) - - similarity = self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0 + raise MissingHotmapError( + "No matching was performed in order to be image logged" + ) + + self.imglog.dump_hotmap( + "imglog%s-3hotmap-1full.png" % self.imglog.printable_step, + self.imglog.hotmaps[0], + ) + self.imglog.dump_hotmap( + "imglog%s-3hotmap-2filtered.png" % self.imglog.printable_step, + self.imglog.hotmaps[1], + ) + + similarity = ( + self.imglog.similarities[-1] if len(self.imglog.similarities) > 0 else 0.0 + ) name = "imglog%s-3hotmap-%s.png" % (self.imglog.printable_step, similarity) self.imglog.dump_hotmap(name, self.imglog.hotmaps[-1]) @@ -3195,7 +4132,13 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: # available and currently fully compatible methods self.categories["hybrid"] = "hybrid_methods" - self.algorithms["hybrid_methods"] = ("autopy", "contour", "template", "feature", "tempfeat") + self.algorithms["hybrid_methods"] = ( + "autopy", + "contour", + "template", + "feature", + "tempfeat", + ) # other attributes self.matcher = None @@ -3206,24 +4149,30 @@ def __init__(self, configure: bool = True, synchronize: bool = True) -> None: if synchronize: self.__synchronize_backend(reset=False) - def __configure_backend(self, backend: str = None, category: str = "hybrid", - reset: bool = False) -> None: + def __configure_backend( + self, backend: str = None, category: str = "hybrid", reset: bool = False + ) -> None: if category != "hybrid": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: # backends are the same as the ones for the base class super(HybridFinder, self).configure_backend(backend=backend, reset=True) if backend is None: backend = GlobalConfig.hybrid_match_backend if backend not in self.algorithms[self.categories[category]]: - raise UnsupportedBackendError("Backend '%s' is not among the supported ones: " - "%s" % (backend, self.algorithms[self.categories[category]])) + raise UnsupportedBackendError( + "Backend '%s' is not among the supported ones: " + "%s" % (backend, self.algorithms[self.categories[category]]) + ) self.params[category] = {} self.params[category]["backend"] = backend - def configure_backend(self, backend: str = None, category: str = "hybrid", - reset: bool = False) -> None: + def configure_backend( + self, backend: str = None, category: str = "hybrid", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -3231,14 +4180,19 @@ def configure_backend(self, backend: str = None, category: str = "hybrid", """ self.__configure_backend(backend, category, reset) - def __synchronize_backend(self, backend: str = None, category: str = "hybrid", - reset: bool = False) -> None: + def __synchronize_backend( + self, backend: str = None, category: str = "hybrid", reset: bool = False + ) -> None: if category != "hybrid": - raise UnsupportedBackendError("Backend category '%s' is not supported" % category) + raise UnsupportedBackendError( + "Backend category '%s' is not supported" % category + ) if reset: super(HybridFinder, self).synchronize_backend("hybrid", reset=True) if backend is not None and self.params[category]["backend"] != backend: - raise UninitializedBackendError("Backend '%s' has not been configured yet" % backend) + raise UninitializedBackendError( + "Backend '%s' has not been configured yet" % backend + ) backend = self.params[category]["backend"] # default matcher in case of a simple chain without own matching config @@ -3259,8 +4213,9 @@ def __synchronize_backend(self, backend: str = None, category: str = "hybrid", elif backend == "deep": self.matcher = DeepFinder() - def synchronize_backend(self, backend: str = None, category: str = "hybrid", - reset: bool = False) -> None: + def synchronize_backend( + self, backend: str = None, category: str = "hybrid", reset: bool = False + ) -> None: """ Custom implementation of the base method. @@ -3283,7 +4238,9 @@ def find(self, needle: "Image", haystack: "Image") -> "list[Match]": for step_needle in needle: - if step_needle.use_own_settings and not isinstance(step_needle.match_settings, HybridFinder): + if step_needle.use_own_settings and not isinstance( + step_needle.match_settings, HybridFinder + ): matcher = step_needle.match_settings else: matcher = self.matcher diff --git a/guibot/guibot.py b/guibot/guibot.py index 47154436..b4da4005 100644 --- a/guibot/guibot.py +++ b/guibot/guibot.py @@ -35,7 +35,7 @@ from .finder import Finder -log = logging.getLogger('guibot') +log = logging.getLogger("guibot") log.addHandler(logging.NullHandler()) diff --git a/guibot/guibot_proxy.py b/guibot/guibot_proxy.py index b30c0ca6..ab1a1e0a 100644 --- a/guibot/guibot_proxy.py +++ b/guibot/guibot_proxy.py @@ -45,7 +45,9 @@ from .controller import Controller -def serialize_custom_error(class_obj: type) -> dict[str, "str | getset_descriptor | dictproxy"]: +def serialize_custom_error( + class_obj: type, +) -> dict[str, "str | getset_descriptor | dictproxy"]: """ Serialization method for the :py:class:`errors.UnsupportedBackendError` which was chosen just as a sample. @@ -70,7 +72,9 @@ def register_exception_serialization() -> None: for it with some extra setup steps and functions below. """ for exception in [errors.UnsupportedBackendError]: - pyro.util.SerializerBase.register_class_to_dict(exception, serialize_custom_error) + pyro.util.SerializerBase.register_class_to_dict( + exception, serialize_custom_error + ) class GuiBotProxy(GuiBot): @@ -159,7 +163,7 @@ def click(self, *args: tuple[type, ...], **kwargs: dict[str, type]) -> str: """See :py:class:`guibot.guibot.GuiBot` and its inherited :py:class:`guibot.region.Region` for details.""" return self._proxify(super(GuiBotProxy, self).click(*args, **kwargs)) - def right_click(self,*args: tuple[type, ...], **kwargs: dict[str, type]) -> str: + def right_click(self, *args: tuple[type, ...], **kwargs: dict[str, type]) -> str: """See :py:class:`guibot.guibot.GuiBot` and its inherited :py:class:`guibot.region.Region` for details.""" return self._proxify(super(GuiBotProxy, self).right_click(*args, **kwargs)) diff --git a/guibot/guibot_simple.py b/guibot/guibot_simple.py index 361ab0e2..42678460 100644 --- a/guibot/guibot_simple.py +++ b/guibot/guibot_simple.py @@ -41,7 +41,7 @@ # accessible attributes of this module guibot = None last_match = None -buttons = namedtuple('buttons', ["mouse", "key", "mod"]) +buttons = namedtuple("buttons", ["mouse", "key", "mod"]) def initialize() -> None: @@ -60,7 +60,9 @@ def initialize() -> None: def check_initialized() -> None: """Make sure the simple API is initialized.""" if guibot is None: - raise AssertionError("Guibot module not initialized - run initialize() before using the simple API") + raise AssertionError( + "Guibot module not initialized - run initialize() before using the simple API" + ) def add_path(*args: tuple[type, ...], **kwargs: dict[str, type]) -> None: diff --git a/guibot/imagelogger.py b/guibot/imagelogger.py index 9e460ac6..2a9f4702 100644 --- a/guibot/imagelogger.py +++ b/guibot/imagelogger.py @@ -82,6 +82,7 @@ def get_printable_step(self) -> str: :returns: step number prepended with zeroes to obtain a fixed length enumeration """ return ("%0" + str(ImageLogger.step_width) + "d") % ImageLogger.step + printable_step = property(fget=get_printable_step) def debug(self) -> None: @@ -119,16 +120,15 @@ def dump_matched_images(self) -> None: shutil.rmtree(ImageLogger.logging_destination) os.mkdir(ImageLogger.logging_destination) - needle_name = "imglog%s-1needle-%s" % (self.printable_step, - str(self.needle)) - needle_path = os.path.join(ImageLogger.logging_destination, - needle_name) + needle_name = "imglog%s-1needle-%s" % (self.printable_step, str(self.needle)) + needle_path = os.path.join(ImageLogger.logging_destination, needle_name) self.needle.save(needle_path) - haystack_name = "imglog%s-2haystack-%s" % (self.printable_step, - str(self.haystack)) - haystack_path = os.path.join(ImageLogger.logging_destination, - haystack_name) + haystack_name = "imglog%s-2haystack-%s" % ( + self.printable_step, + str(self.haystack), + ) + haystack_path = os.path.join(ImageLogger.logging_destination, haystack_name) self.haystack.save(haystack_path) def dump_hotmap(self, name: str, hotmap: PIL.Image.Image | numpy.ndarray) -> None: @@ -150,8 +150,8 @@ def dump_hotmap(self, name: str, hotmap: PIL.Image.Image | numpy.ndarray) -> Non # numpy or other array pil_image = PIL.Image.fromarray(hotmap) # NOTE: some modes cannot be saved unless converted to RGB - if pil_image.mode != 'RGB': - pil_image = pil_image.convert('RGB') + if pil_image.mode != "RGB": + pil_image = pil_image.convert("RGB") pil_image.save(path, compress_level=GlobalConfig.image_quality) def clear(self) -> None: diff --git a/guibot/inputmap.py b/guibot/inputmap.py index bb98b42b..8e7e5c8a 100644 --- a/guibot/inputmap.py +++ b/guibot/inputmap.py @@ -120,75 +120,77 @@ def to_string(self, key: str) -> str: """ if key is None: raise ValueError("The key %s does not exist in the current key map" % key) - return {self.ENTER: "Enter", - self.TAB: "Tab", - self.ESC: "Esc", - self.BACKSPACE: "Backspace", - self.DELETE: "Delete", - self.INSERT: "Insert", - self.CTRL: "Ctrl", - self.ALT: "Alt", - self.SHIFT: "Shift", - self.META: "Meta", - self.RCTRL: "RightControl", - self.RALT: "RightAlt", - self.RSHIFT: "RightShift", - self.RMETA: "RightMeta", - self.F1: "F1", - self.F2: "F2", - self.F3: "F3", - self.F4: "F4", - self.F5: "F5", - self.F6: "F6", - self.F7: "F7", - self.F8: "F8", - self.F9: "F9", - self.F10: "F10", - self.F11: "F11", - self.F12: "F12", - self.F13: "F13", - self.F14: "F14", - self.F15: "F15", - self.F16: "F16", - self.F17: "F17", - self.F18: "F18", - self.F19: "F19", - self.F20: "F20", - self.HOME: "Home", - self.END: "End", - self.LEFT: "Left", - self.RIGHT: "Right", - self.UP: "Up", - self.DOWN: "Down", - self.PAGE_DOWN: "Page Down", - self.PAGE_UP: "Page Up", - self.CAPS_LOCK: "Caps Lock", - self.PRINTSCREEN: "Print Screen", - self.PAUSE: "Pause", - self.SCROLL_LOCK: "Scroll Lock", - self.NUM_LOCK: "Num Lock", - self.SYS_REQ: "Sys Req", - self.SUPER: "Super", - self.RSUPER: "RightSuper", - self.HYPER: "Hyper", - self.RHYPER: "RightHyper", - self.MENU: "Menu", - self.KP0: "KeyPad Number 0", - self.KP1: "KeyPad Number 1", - self.KP2: "KeyPad Number 2", - self.KP3: "KeyPad Number 3", - self.KP4: "KeyPad Number 4", - self.KP5: "KeyPad Number 5", - self.KP6: "KeyPad Number 6", - self.KP7: "KeyPad Number 7", - self.KP8: "KeyPad Number 8", - self.KP9: "KeyPad Number 9", - self.KP_ADD: "KeyPad Add", - self.KP_DECIMAL: "KeyPad Decimal", - self.KP_DIVIDE: "KeyPad Divide", - self.KP_ENTER: "KeyPad Enter", - self.KP_MULTIPLY: "KeyPad Multiply", - self.KP_SUBTRACT: "KeyPad Subtract"}[key] + return { + self.ENTER: "Enter", + self.TAB: "Tab", + self.ESC: "Esc", + self.BACKSPACE: "Backspace", + self.DELETE: "Delete", + self.INSERT: "Insert", + self.CTRL: "Ctrl", + self.ALT: "Alt", + self.SHIFT: "Shift", + self.META: "Meta", + self.RCTRL: "RightControl", + self.RALT: "RightAlt", + self.RSHIFT: "RightShift", + self.RMETA: "RightMeta", + self.F1: "F1", + self.F2: "F2", + self.F3: "F3", + self.F4: "F4", + self.F5: "F5", + self.F6: "F6", + self.F7: "F7", + self.F8: "F8", + self.F9: "F9", + self.F10: "F10", + self.F11: "F11", + self.F12: "F12", + self.F13: "F13", + self.F14: "F14", + self.F15: "F15", + self.F16: "F16", + self.F17: "F17", + self.F18: "F18", + self.F19: "F19", + self.F20: "F20", + self.HOME: "Home", + self.END: "End", + self.LEFT: "Left", + self.RIGHT: "Right", + self.UP: "Up", + self.DOWN: "Down", + self.PAGE_DOWN: "Page Down", + self.PAGE_UP: "Page Up", + self.CAPS_LOCK: "Caps Lock", + self.PRINTSCREEN: "Print Screen", + self.PAUSE: "Pause", + self.SCROLL_LOCK: "Scroll Lock", + self.NUM_LOCK: "Num Lock", + self.SYS_REQ: "Sys Req", + self.SUPER: "Super", + self.RSUPER: "RightSuper", + self.HYPER: "Hyper", + self.RHYPER: "RightHyper", + self.MENU: "Menu", + self.KP0: "KeyPad Number 0", + self.KP1: "KeyPad Number 1", + self.KP2: "KeyPad Number 2", + self.KP3: "KeyPad Number 3", + self.KP4: "KeyPad Number 4", + self.KP5: "KeyPad Number 5", + self.KP6: "KeyPad Number 6", + self.KP7: "KeyPad Number 7", + self.KP8: "KeyPad Number 8", + self.KP9: "KeyPad Number 9", + self.KP_ADD: "KeyPad Add", + self.KP_DECIMAL: "KeyPad Decimal", + self.KP_DIVIDE: "KeyPad Divide", + self.KP_ENTER: "KeyPad Enter", + self.KP_MULTIPLY: "KeyPad Multiply", + self.KP_SUBTRACT: "KeyPad Subtract", + }[key] class AutoPyKey(Key): @@ -285,58 +287,58 @@ def __init__(self) -> None: """Build an instance containing the key map for the xdotool backend.""" super().__init__() - self.ENTER = 'Return' # also 'enter' - self.TAB = 'Tab' - self.ESC = 'Escape' - self.BACKSPACE = 'BackSpace' - self.DELETE = 'Delete' - self.INSERT = 'Insert' - - self.CTRL = 'ctrl' # special handling - self.ALT = 'alt' # special handling - self.SHIFT = 'shift' # special handling - self.META = 'meta' # special handling - self.RCTRL = 'CtrlR' - self.RALT = 'AltR' - self.RSHIFT = 'ShiftR' - self.RMETA = 'MetaR' - - self.F1 = 'F1' - self.F2 = 'F2' - self.F3 = 'F3' - self.F4 = 'F4' - self.F5 = 'F5' - self.F6 = 'F6' - self.F7 = 'F7' - self.F8 = 'F8' - self.F9 = 'F9' - self.F10 = 'F10' - self.F11 = 'F11' - self.F12 = 'F12' - self.F13 = 'F13' - self.F14 = 'F14' - self.F15 = 'F15' - self.F16 = 'F16' - self.F17 = 'F17' - self.F18 = 'F18' - self.F19 = 'F19' - self.F20 = 'F20' - - self.HOME = 'Home' - self.END = 'End' - self.LEFT = 'Left' - self.RIGHT = 'Right' - self.UP = 'Up' - self.DOWN = 'Down' - self.PAGE_DOWN = 'Page_Down' - self.PAGE_UP = 'Page_Up' - - self.CAPS_LOCK = 'Caps_Lock' + self.ENTER = "Return" # also 'enter' + self.TAB = "Tab" + self.ESC = "Escape" + self.BACKSPACE = "BackSpace" + self.DELETE = "Delete" + self.INSERT = "Insert" + + self.CTRL = "ctrl" # special handling + self.ALT = "alt" # special handling + self.SHIFT = "shift" # special handling + self.META = "meta" # special handling + self.RCTRL = "CtrlR" + self.RALT = "AltR" + self.RSHIFT = "ShiftR" + self.RMETA = "MetaR" + + self.F1 = "F1" + self.F2 = "F2" + self.F3 = "F3" + self.F4 = "F4" + self.F5 = "F5" + self.F6 = "F6" + self.F7 = "F7" + self.F8 = "F8" + self.F9 = "F9" + self.F10 = "F10" + self.F11 = "F11" + self.F12 = "F12" + self.F13 = "F13" + self.F14 = "F14" + self.F15 = "F15" + self.F16 = "F16" + self.F17 = "F17" + self.F18 = "F18" + self.F19 = "F19" + self.F20 = "F20" + + self.HOME = "Home" + self.END = "End" + self.LEFT = "Left" + self.RIGHT = "Right" + self.UP = "Up" + self.DOWN = "Down" + self.PAGE_DOWN = "Page_Down" + self.PAGE_UP = "Page_Up" + + self.CAPS_LOCK = "Caps_Lock" # TODO: 'print screen' is not available self.PRINTSCREEN = None - self.PAUSE = 'Pause' - self.SCROLL_LOCK = 'Scroll_Lock' - self.NUM_LOCK = 'Num_Lock' + self.PAUSE = "Pause" + self.SCROLL_LOCK = "Scroll_Lock" + self.NUM_LOCK = "Num_Lock" # TODO: the following are not available self.SYS_REQ = None self.SUPER = None @@ -346,21 +348,21 @@ def __init__(self) -> None: # TODO: 'menu' is not available self.MENU = None - self.KP0 = 'KP_0' - self.KP1 = 'KP_1' - self.KP2 = 'KP_2' - self.KP3 = 'KP_3' - self.KP4 = 'KP_4' - self.KP5 = 'KP_5' - self.KP6 = 'KP_6' - self.KP7 = 'KP_7' - self.KP8 = 'KP_8' - self.KP9 = 'KP_9' - self.KP_ENTER = 'KP_Enter' - self.KP_DIVIDE = 'KP_Divide' - self.KP_MULTIPLY = 'KP_Multiply' - self.KP_SUBTRACT = 'KP_Subtract' - self.KP_ADD = 'KP_Add' + self.KP0 = "KP_0" + self.KP1 = "KP_1" + self.KP2 = "KP_2" + self.KP3 = "KP_3" + self.KP4 = "KP_4" + self.KP5 = "KP_5" + self.KP6 = "KP_6" + self.KP7 = "KP_7" + self.KP8 = "KP_8" + self.KP9 = "KP_9" + self.KP_ENTER = "KP_Enter" + self.KP_DIVIDE = "KP_Divide" + self.KP_MULTIPLY = "KP_Multiply" + self.KP_SUBTRACT = "KP_Subtract" + self.KP_ADD = "KP_Add" self.KP_DECIMAL = None @@ -373,77 +375,77 @@ def __init__(self) -> None: # TODO: it would be preferable to translate directly to RBF like # 'ENTER = rfb.KEY_Return' but this is internal for the vncdotool - self.ENTER = 'return' # also 'enter' - self.TAB = 'tab' - self.ESC = 'esc' - self.BACKSPACE = 'bsp' - self.DELETE = 'del' # also 'delete' - self.INSERT = 'ins' - - self.CTRL = 'ctrl' # also 'lctrl' - self.ALT = 'alt' # also 'lalt' - self.SHIFT = 'shift' # also 'lshift' - self.META = 'meta' # also 'lmeta' - self.RCTRL = 'rctrl' - self.RALT = 'ralt' - self.RSHIFT = 'rshift' - self.RMETA = 'rmeta' - - self.F1 = 'f1' - self.F2 = 'f2' - self.F3 = 'f3' - self.F4 = 'f4' - self.F5 = 'f5' - self.F6 = 'f6' - self.F7 = 'f7' - self.F8 = 'f8' - self.F9 = 'f9' - self.F10 = 'f10' - self.F11 = 'f11' - self.F12 = 'f12' - self.F13 = 'f13' - self.F14 = 'f14' - self.F15 = 'f15' - self.F16 = 'f16' - self.F17 = 'f17' - self.F18 = 'f18' - self.F19 = 'f19' - self.F20 = 'f20' - - self.HOME = 'home' - self.END = 'end' - self.LEFT = 'left' - self.RIGHT = 'right' - self.UP = 'up' - self.DOWN = 'down' - self.PAGE_DOWN = 'pgdn' - self.PAGE_UP = 'pgup' - - self.CAPS_LOCK = 'caplk' + self.ENTER = "return" # also 'enter' + self.TAB = "tab" + self.ESC = "esc" + self.BACKSPACE = "bsp" + self.DELETE = "del" # also 'delete' + self.INSERT = "ins" + + self.CTRL = "ctrl" # also 'lctrl' + self.ALT = "alt" # also 'lalt' + self.SHIFT = "shift" # also 'lshift' + self.META = "meta" # also 'lmeta' + self.RCTRL = "rctrl" + self.RALT = "ralt" + self.RSHIFT = "rshift" + self.RMETA = "rmeta" + + self.F1 = "f1" + self.F2 = "f2" + self.F3 = "f3" + self.F4 = "f4" + self.F5 = "f5" + self.F6 = "f6" + self.F7 = "f7" + self.F8 = "f8" + self.F9 = "f9" + self.F10 = "f10" + self.F11 = "f11" + self.F12 = "f12" + self.F13 = "f13" + self.F14 = "f14" + self.F15 = "f15" + self.F16 = "f16" + self.F17 = "f17" + self.F18 = "f18" + self.F19 = "f19" + self.F20 = "f20" + + self.HOME = "home" + self.END = "end" + self.LEFT = "left" + self.RIGHT = "right" + self.UP = "up" + self.DOWN = "down" + self.PAGE_DOWN = "pgdn" + self.PAGE_UP = "pgup" + + self.CAPS_LOCK = "caplk" # TODO: 'print screen' is not available self.PRINTSCREEN = None - self.PAUSE = 'pause' - self.SCROLL_LOCK = 'scrlk' - self.NUM_LOCK = 'numlk' - self.SYS_REQ = 'sysrq' - self.SUPER = 'super' # also 'lsuper' - self.RSUPER = 'rsuper' - self.HYPER = 'hyper' # also 'lhyper' - self.RHYPER = 'rhyper' + self.PAUSE = "pause" + self.SCROLL_LOCK = "scrlk" + self.NUM_LOCK = "numlk" + self.SYS_REQ = "sysrq" + self.SUPER = "super" # also 'lsuper' + self.RSUPER = "rsuper" + self.HYPER = "hyper" # also 'lhyper' + self.RHYPER = "rhyper" # TODO: 'menu' is not available self.MENU = None - self.KP0 = 'kp0' - self.KP1 = 'kp1' - self.KP2 = 'kp2' - self.KP3 = 'kp3' - self.KP4 = 'kp4' - self.KP5 = 'kp5' - self.KP6 = 'kp6' - self.KP7 = 'kp7' - self.KP8 = 'kp8' - self.KP9 = 'kp9' - self.KP_ENTER = 'kpenter' + self.KP0 = "kp0" + self.KP1 = "kp1" + self.KP2 = "kp2" + self.KP3 = "kp3" + self.KP4 = "kp4" + self.KP5 = "kp5" + self.KP6 = "kp6" + self.KP7 = "kp7" + self.KP8 = "kp8" + self.KP9 = "kp9" + self.KP_ENTER = "kpenter" # TODO: these are not available self.KP_DIVIDE = None self.KP_MULTIPLY = None @@ -461,59 +463,59 @@ def __init__(self) -> None: # TODO: it would be preferable to translate directly to RBF like # 'ENTER = rfb.KEY_Return' but this is internal for the vncdotool - self.ENTER = 'return' # also 'enter' - self.TAB = 'tab' - self.ESC = 'escape' # also 'esc' - self.BACKSPACE = 'backspace' - self.DELETE = 'delete' # also 'del' - self.INSERT = 'insert' - - self.CTRL = 'ctrl' # also 'lctrl' - self.ALT = 'alt' # also 'lalt' - self.SHIFT = 'shift' # also 'lshift' + self.ENTER = "return" # also 'enter' + self.TAB = "tab" + self.ESC = "escape" # also 'esc' + self.BACKSPACE = "backspace" + self.DELETE = "delete" # also 'del' + self.INSERT = "insert" + + self.CTRL = "ctrl" # also 'lctrl' + self.ALT = "alt" # also 'lalt' + self.SHIFT = "shift" # also 'lshift' # TODO: 'meta key' is not available self.META = None - self.RCTRL = 'ctrlright' - self.RALT = 'altright' - self.RSHIFT = 'shiftright' + self.RCTRL = "ctrlright" + self.RALT = "altright" + self.RSHIFT = "shiftright" # TODO: 'meta key' is not available self.RMETA = None - self.F1 = 'f1' - self.F2 = 'f2' - self.F3 = 'f3' - self.F4 = 'f4' - self.F5 = 'f5' - self.F6 = 'f6' - self.F7 = 'f7' - self.F8 = 'f8' - self.F9 = 'f9' - self.F10 = 'f10' - self.F11 = 'f11' - self.F12 = 'f12' - self.F13 = 'f13' - self.F14 = 'f14' - self.F15 = 'f15' - self.F16 = 'f16' - self.F17 = 'f17' - self.F18 = 'f18' - self.F19 = 'f19' - self.F20 = 'f20' - - self.HOME = 'home' - self.END = 'end' - self.LEFT = 'left' - self.RIGHT = 'right' - self.UP = 'up' - self.DOWN = 'down' - self.PAGE_DOWN = 'pgdn' - self.PAGE_UP = 'pgup' - - self.CAPS_LOCK = 'capslock' - self.PRINTSCREEN = 'printscreen' - self.PAUSE = 'pause' - self.SCROLL_LOCK = 'scrolllock' - self.NUM_LOCK = 'numlock' + self.F1 = "f1" + self.F2 = "f2" + self.F3 = "f3" + self.F4 = "f4" + self.F5 = "f5" + self.F6 = "f6" + self.F7 = "f7" + self.F8 = "f8" + self.F9 = "f9" + self.F10 = "f10" + self.F11 = "f11" + self.F12 = "f12" + self.F13 = "f13" + self.F14 = "f14" + self.F15 = "f15" + self.F16 = "f16" + self.F17 = "f17" + self.F18 = "f18" + self.F19 = "f19" + self.F20 = "f20" + + self.HOME = "home" + self.END = "end" + self.LEFT = "left" + self.RIGHT = "right" + self.UP = "up" + self.DOWN = "down" + self.PAGE_DOWN = "pgdn" + self.PAGE_UP = "pgup" + + self.CAPS_LOCK = "capslock" + self.PRINTSCREEN = "printscreen" + self.PAUSE = "pause" + self.SCROLL_LOCK = "scrolllock" + self.NUM_LOCK = "numlock" # TODO: these are not available self.SYS_REQ = None self.SUPER = None @@ -562,12 +564,16 @@ def to_string(self, key: str) -> str: :raises: :py:class:`ValueError` if `key` is not found in the current modifier map """ if key is None: - raise ValueError("The modifier key %s does not exist in the current modifier map" % key) - return {self.MOD_NONE: "None", - self.MOD_CTRL: "Ctrl", - self.MOD_ALT: "Alt", - self.MOD_SHIFT: "Shift", - self.MOD_META: "Meta"}[key] + raise ValueError( + "The modifier key %s does not exist in the current modifier map" % key + ) + return { + self.MOD_NONE: "None", + self.MOD_CTRL: "Ctrl", + self.MOD_ALT: "Alt", + self.MOD_SHIFT: "Shift", + self.MOD_META: "Meta", + }[key] class AutoPyKeyModifier(KeyModifier): @@ -596,10 +602,10 @@ def __init__(self) -> None: # TODO: 'none' is not available self.MOD_NONE = None - self.MOD_CTRL = 'ctrl' - self.MOD_ALT = 'alt' - self.MOD_SHIFT = 'shift' - self.MOD_META = 'meta' + self.MOD_CTRL = "ctrl" + self.MOD_ALT = "alt" + self.MOD_SHIFT = "shift" + self.MOD_META = "meta" class VNCDoToolKeyModifier(KeyModifier): @@ -611,10 +617,10 @@ def __init__(self) -> None: # TODO: 'none' is not available self.MOD_NONE = None - self.MOD_CTRL = 'ctrl' - self.MOD_ALT = 'alt' - self.MOD_SHIFT = 'shift' - self.MOD_META = 'meta' + self.MOD_CTRL = "ctrl" + self.MOD_ALT = "alt" + self.MOD_SHIFT = "shift" + self.MOD_META = "meta" class PyAutoGUIKeyModifier(KeyModifier): @@ -626,9 +632,9 @@ def __init__(self) -> None: # TODO: 'none' is not available self.MOD_NONE = None - self.MOD_CTRL = 'ctrl' - self.MOD_ALT = 'alt' - self.MOD_SHIFT = 'shift' + self.MOD_CTRL = "ctrl" + self.MOD_ALT = "alt" + self.MOD_SHIFT = "shift" # TODO: 'meta' is not available self.MOD_META = None @@ -653,9 +659,11 @@ def to_string(self, key: str) -> str: """ if key is None: raise ValueError("The key %s does not exist in the current mouse map" % key) - return {self.LEFT_BUTTON: "MouseLeft", - self.RIGHT_BUTTON: "MouseRight", - self.CENTER_BUTTON: "MouseCenter"}[key] + return { + self.LEFT_BUTTON: "MouseLeft", + self.RIGHT_BUTTON: "MouseRight", + self.CENTER_BUTTON: "MouseCenter", + }[key] class AutoPyMouseButton(MouseButton): @@ -703,6 +711,6 @@ def __init__(self) -> None: """Build an instance containing the mouse button map for the PyAutoGUI backend.""" super().__init__() - self.LEFT_BUTTON = 'left' - self.RIGHT_BUTTON = 'right' - self.CENTER_BUTTON = 'middle' + self.LEFT_BUTTON = "left" + self.RIGHT_BUTTON = "right" + self.CENTER_BUTTON = "middle" diff --git a/guibot/location.py b/guibot/location.py index d6668e4b..be75fb30 100644 --- a/guibot/location.py +++ b/guibot/location.py @@ -53,6 +53,7 @@ def get_x(self) -> int: :returns: x coordinate of the location """ return self._xpos + x = property(fget=get_x) def get_y(self) -> int: @@ -62,4 +63,5 @@ def get_y(self) -> int: :returns: y coordinate of the location """ return self._ypos + y = property(fget=get_y) diff --git a/guibot/match.py b/guibot/match.py index 26fc930f..1864fe4e 100644 --- a/guibot/match.py +++ b/guibot/match.py @@ -37,9 +37,18 @@ class Match(Region): of matches on a screen. """ - def __init__(self, xpos: int, ypos: int, width: int, height: int, - dx: int = 0, dy: int = 0, similarity: float = 0.0, - dc: Controller = None, cv: "Finder" = None) -> None: + def __init__( + self, + xpos: int, + ypos: int, + width: int, + height: int, + dx: int = 0, + dy: int = 0, + similarity: float = 0.0, + dc: Controller = None, + cv: "Finder" = None, + ) -> None: """ Build a match object. @@ -73,6 +82,7 @@ def set_x(self, value: int) -> None: :param value: x coordinate of the upleft vertex of the region """ self._xpos = value + x = property(fget=Region.get_x, fset=set_x) def set_y(self, value: int) -> None: @@ -84,6 +94,7 @@ def set_y(self, value: int) -> None: :param value: y coordinate of the upleft vertex of the region """ self._ypos = value + y = property(fget=Region.get_y, fset=set_y) def get_dx(self) -> int: @@ -93,6 +104,7 @@ def get_dx(self) -> int: :returns: x offset from the center of the match region """ return self._dx + dx = property(fget=get_dx) def get_dy(self) -> int: @@ -102,6 +114,7 @@ def get_dy(self) -> int: :returns: y offset from the center of the match region """ return self._dy + dy = property(fget=get_dy) def get_similarity(self) -> float: @@ -111,6 +124,7 @@ def get_similarity(self) -> float: :returns: similarity the match was obtained with """ return self._similarity + similarity = property(fget=get_similarity) def get_target(self) -> Location: @@ -119,13 +133,19 @@ def get_target(self) -> Location: :returns: target location to click on if clicking on the match """ - return self.calc_click_point(self._xpos, self._ypos, - self._width, self._height, - Location(self._dx, self._dy)) + return self.calc_click_point( + self._xpos, + self._ypos, + self._width, + self._height, + Location(self._dx, self._dy), + ) + target = property(fget=get_target) - def calc_click_point(self, xpos: int, ypos: int, width: int, height: int, - offset: Location) -> Location: + def calc_click_point( + self, xpos: int, ypos: int, width: int, height: int, offset: Location + ) -> Location: """ Calculate target location to click on if clicking on the match. @@ -136,8 +156,9 @@ def calc_click_point(self, xpos: int, ypos: int, width: int, height: int, :param offset: offset from the match region center for the final target :returns: target location to click on if clicking on the match """ - center_region = Region(0, 0, width, height, - dc=self.dc_backend, cv=self.cv_backend) + center_region = Region( + 0, 0, width, height, dc=self.dc_backend, cv=self.cv_backend + ) click_center = center_region.center target_xpos = xpos + click_center.x + offset.x diff --git a/guibot/path.py b/guibot/path.py index 4e398d6a..c865a2d7 100644 --- a/guibot/path.py +++ b/guibot/path.py @@ -32,8 +32,9 @@ from .fileresolver import FileResolver as Path -logging.getLogger("guibot.path")\ - .warn("The `path` module is deprecated, use `fileresolver` instead.") +logging.getLogger("guibot.path").warn( + "The `path` module is deprecated, use `fileresolver` instead." +) __all__ = ["Path"] diff --git a/guibot/region.py b/guibot/region.py index 44b6f323..da5d0a60 100644 --- a/guibot/region.py +++ b/guibot/region.py @@ -43,7 +43,8 @@ from .controller import * import logging -log = logging.getLogger('guibot.region') + +log = logging.getLogger("guibot.region") class Region(object): @@ -52,8 +53,15 @@ class Region(object): validation of expected images, and mouse and keyboard control. """ - def __init__(self, xpos: int = 0, ypos: int = 0, width: int = 0, height: int = 0, - dc: Controller = None, cv: "Finder" = None) -> None: + def __init__( + self, + xpos: int = 0, + ypos: int = 0, + width: int = 0, + height: int = 0, + dc: Controller = None, + cv: "Finder" = None, + ) -> None: """ Build a region object from upleft to downright vertex coordinates. @@ -123,17 +131,17 @@ def __init__(self, xpos: int = 0, ypos: int = 0, width: int = 0, height: int = 0 mouse_map = self.dc_backend.mousemap for mouse_button in dir(mouse_map): - if mouse_button.endswith('_BUTTON'): + if mouse_button.endswith("_BUTTON"): setattr(self, mouse_button, getattr(mouse_map, mouse_button)) key_map = self.dc_backend.keymap for key in dir(key_map): - if not key.startswith('__') and key != "to_string": + if not key.startswith("__") and key != "to_string": setattr(self, key, getattr(key_map, key)) mod_map = self.dc_backend.modmap for modifier_key in dir(mod_map): - if modifier_key.startswith('MOD_'): + if modifier_key.startswith("MOD_"): setattr(self, modifier_key, getattr(mod_map, modifier_key)) def _ensure_screen_clipping(self) -> None: @@ -165,6 +173,7 @@ def get_x(self) -> int: :returns: x coordinate of the upleft vertex of the region """ return self._xpos + x = property(fget=get_x) def get_y(self) -> int: @@ -174,6 +183,7 @@ def get_y(self) -> int: :returns: y coordinate of the upleft vertex of the region """ return self._ypos + y = property(fget=get_y) def get_width(self) -> int: @@ -183,6 +193,7 @@ def get_width(self) -> int: :returns: width of the region (xpos+width for downright vertex x) """ return self._width + width = property(fget=get_width) def get_height(self) -> int: @@ -192,6 +203,7 @@ def get_height(self) -> int: :returns: height of the region (ypos+height for downright vertex y) """ return self._height + height = property(fget=get_height) def get_center(self) -> Location: @@ -204,6 +216,7 @@ def get_center(self) -> Location: ypos = self._ypos + int(self._height / 2) return Location(xpos, ypos) + center = property(fget=get_center) def get_top_left(self) -> Location: @@ -213,6 +226,7 @@ def get_top_left(self) -> Location: :returns: upleft vertex of the region """ return Location(self._xpos, self._ypos) + top_left = property(fget=get_top_left) def get_top_right(self) -> Location: @@ -222,6 +236,7 @@ def get_top_right(self) -> Location: :returns: upright vertex of the region """ return Location(self._xpos + self._width, self._ypos) + top_right = property(fget=get_top_right) def get_bottom_left(self) -> Location: @@ -231,6 +246,7 @@ def get_bottom_left(self) -> Location: :returns: downleft vertex of the region """ return Location(self._xpos, self._ypos + self._height) + bottom_left = property(fget=get_bottom_left) def get_bottom_right(self) -> Location: @@ -240,6 +256,7 @@ def get_bottom_right(self) -> Location: :returns: downright vertex of the region """ return Location(self._xpos + self._width, self._ypos + self._height) + bottom_right = property(fget=get_bottom_right) def is_empty(self) -> bool: @@ -249,6 +266,7 @@ def is_empty(self) -> bool: :returns: whether the region is empty, i.e. has zero size """ return self._width == 0 and self._height == 0 + is_empty = property(fget=is_empty) def get_last_match(self) -> "Match": @@ -258,6 +276,7 @@ def get_last_match(self) -> "Match": :returns: last match obtained from finding a target within the region """ return self._last_match + last_match = property(fget=get_last_match) def get_mouse_location(self) -> Location: @@ -267,6 +286,7 @@ def get_mouse_location(self) -> Location: :returns: mouse location """ return self.dc_backend.mouse_location + mouse_location = property(fget=get_mouse_location) """Main region methods""" @@ -292,8 +312,9 @@ def nearby(self, rrange: int = 50) -> "Region": new_height = self._height + rrange + self._ypos - new_ypos # Final clipping is done in the Region constructor - return Region(new_xpos, new_ypos, new_width, new_height, - self.dc_backend, self.cv_backend) + return Region( + new_xpos, new_ypos, new_width, new_height, self.dc_backend, self.cv_backend + ) def above(self, rrange: int = 0) -> "Region": """ @@ -315,8 +336,14 @@ def above(self, rrange: int = 0) -> "Region": new_height = self._height + self._ypos - new_ypos # Final clipping is done in the Region constructor - return Region(self._xpos, new_ypos, self._width, new_height, - self.dc_backend, self.cv_backend) + return Region( + self._xpos, + new_ypos, + self._width, + new_height, + self.dc_backend, + self.cv_backend, + ) def below(self, rrange: int = 0) -> "Region": """ @@ -333,8 +360,14 @@ def below(self, rrange: int = 0) -> "Region": new_height = self._height + rrange # Final clipping is done in the Region constructor - return Region(self._xpos, self._ypos, self._width, new_height, - self.dc_backend, self.cv_backend) + return Region( + self._xpos, + self._ypos, + self._width, + new_height, + self.dc_backend, + self.cv_backend, + ) def left(self, rrange: int = 0) -> "Region": """ @@ -356,8 +389,14 @@ def left(self, rrange: int = 0) -> "Region": new_width = self._width + self._xpos - new_xpos # Final clipping is done in the Region constructor - return Region(new_xpos, self._ypos, new_width, self._height, - self.dc_backend, self.cv_backend) + return Region( + new_xpos, + self._ypos, + new_width, + self._height, + self.dc_backend, + self.cv_backend, + ) def right(self, rrange: int = 0) -> "Region": """ @@ -374,8 +413,14 @@ def right(self, rrange: int = 0) -> "Region": new_width = self._width + rrange # Final clipping is done in the Region constructor - return Region(self._xpos, self._ypos, new_width, self._height, - self.dc_backend, self.cv_backend) + return Region( + self._xpos, + self._ypos, + new_width, + self._height, + self.dc_backend, + self.cv_backend, + ) """Image expect methods""" @@ -393,8 +438,9 @@ def find(self, target: str | Target, timeout: int = 10) -> "Match": matches = self.find_all(target, timeout=timeout, allow_zero=False) return matches[0] - def find_all(self, target: str | Target, timeout: int = 10, - allow_zero: bool = False) -> "list[Match]": + def find_all( + self, target: str | Target, timeout: int = 10, allow_zero: bool = False + ) -> "list[Match]": """ Find multiples of a target on the screen. @@ -423,13 +469,25 @@ def find_all(self, target: str | Target, timeout: int = 10, relative_matches = cv_backend.find(target, screen_capture) if len(relative_matches) > 0: from .match import Match + for i, match in enumerate(relative_matches): absolute_x, absolute_y = match.x + self.x, match.y + self.y - new_match = Match(absolute_x, absolute_y, - match.width, match.height, match.dx, match.dy, - match.similarity, dc=dc_backend, cv=cv_backend) + new_match = Match( + absolute_x, + absolute_y, + match.width, + match.height, + match.dx, + match.dy, + match.similarity, + dc=dc_backend, + cv=cv_backend, + ) if len(last_matches) > i: - if last_matches[i].x == absolute_x and last_matches[i].y == absolute_y: + if ( + last_matches[i].x == absolute_x + and last_matches[i].y == absolute_y + ): moving_targets = False last_matches[i] = new_match else: @@ -448,8 +506,12 @@ def find_all(self, target: str | Target, timeout: int = 10, if not os.path.exists(ImageLogger.logging_destination): os.mkdir(ImageLogger.logging_destination) dump_path = GlobalConfig.image_logging_destination - hdump_path = os.path.join(dump_path, "last_finderror_haystack.png") - ndump_path = os.path.join(dump_path, "last_finderror_needle.png") + hdump_path = os.path.join( + dump_path, "last_finderror_haystack.png" + ) + ndump_path = os.path.join( + dump_path, "last_finderror_needle.png" + ) screen_capture.save(hdump_path) target.save(ndump_path) raise FindError(target) @@ -479,11 +541,15 @@ def _determine_cv_backend(self, target: Target) -> "Match": return target.match_settings if isinstance(target, Text) and not isinstance(self.cv_backend, TextFinder): raise IncompatibleTargetError("Need text matcher for matching text") - if isinstance(target, Pattern) and not (isinstance(self.cv_backend, CascadeFinder) - or isinstance(self.cv_backend, DeepFinder)): + if isinstance(target, Pattern) and not ( + isinstance(self.cv_backend, CascadeFinder) + or isinstance(self.cv_backend, DeepFinder) + ): raise IncompatibleTargetError("Need pattern matcher for matching patterns") if isinstance(target, Chain) and not isinstance(self.cv_backend, HybridFinder): - raise IncompatibleTargetError("Need hybrid matcher for matching chain targets") + raise IncompatibleTargetError( + "Need hybrid matcher for matching chain targets" + ) target.match_settings = self.cv_backend return self.cv_backend @@ -581,7 +647,9 @@ def idle(self, timeout: int) -> "Region": """Mouse methods""" - def hover(self, target_or_location: "Match | Location | str | Target") -> "Match | None": + def hover( + self, target_or_location: "Match | Location | str | Target" + ) -> "Match | None": """ Hover the mouse over a target or location. @@ -593,6 +661,7 @@ def hover(self, target_or_location: "Match | Location | str | Target") -> "Match # Handle Match from .match import Match + if isinstance(target_or_location, Match): self.dc_backend.mouse_move(target_or_location.target, smooth) return None @@ -608,8 +677,11 @@ def hover(self, target_or_location: "Match | Location | str | Target") -> "Match return match - def click(self, target_or_location: "Match | Location | str | Target", - modifiers: list[str] = None) -> "Match | None": + def click( + self, + target_or_location: "Match | Location | str | Target", + modifiers: list[str] = None, + ) -> "Match | None": """ Click on a target or location using the left mouse button and optionally holding special keys. @@ -630,8 +702,11 @@ def click(self, target_or_location: "Match | Location | str | Target", self.dc_backend.mouse_click(self.LEFT_BUTTON, 1, modifiers) return match - def right_click(self, target_or_location: "Match | Location | str | Target", - modifiers: list[str] = None) -> "Match | None": + def right_click( + self, + target_or_location: "Match | Location | str | Target", + modifiers: list[str] = None, + ) -> "Match | None": """ Click on a target or location using the right mouse button and optionally holding special keys. @@ -645,8 +720,11 @@ def right_click(self, target_or_location: "Match | Location | str | Target", self.dc_backend.mouse_click(self.RIGHT_BUTTON, 1, modifiers) return match - def middle_click(self, target_or_location: "Match | Location | str | Target", - modifiers: list[str] = None) -> "Match | None": + def middle_click( + self, + target_or_location: "Match | Location | str | Target", + modifiers: list[str] = None, + ) -> "Match | None": """ Click on a target or location using the middle mouse button and optionally holding special keys. @@ -660,8 +738,11 @@ def middle_click(self, target_or_location: "Match | Location | str | Target", self.dc_backend.mouse_click(self.CENTER_BUTTON, 1, modifiers) return match - def double_click(self, target_or_location: "Match | Location | str | Target", - modifiers: list[str] = None) -> "Match | None": + def double_click( + self, + target_or_location: "Match | Location | str | Target", + modifiers: list[str] = None, + ) -> "Match | None": """ Double click on a target or location using the left mouse button and optionally holding special keys. @@ -675,8 +756,12 @@ def double_click(self, target_or_location: "Match | Location | str | Target", self.dc_backend.mouse_click(self.LEFT_BUTTON, 2, modifiers) return match - def multi_click(self, target_or_location: "Match | Location | str | Target", - count: int = 3, modifiers: list[str] = None) -> "Match | None": + def multi_click( + self, + target_or_location: "Match | Location | str | Target", + count: int = 3, + modifiers: list[str] = None, + ) -> "Match | None": """ Click N times on a target or location using the left mouse button and optionally holding special keys. @@ -690,9 +775,14 @@ def multi_click(self, target_or_location: "Match | Location | str | Target", self.dc_backend.mouse_click(self.LEFT_BUTTON, count, modifiers) return match - def click_expect(self, click_image_or_location: Image | Location, - expect_target: str | Target, modifiers: list[str] = None, - timeout: int = 60, retries: int = 3) -> "Match | Region": + def click_expect( + self, + click_image_or_location: Image | Location, + expect_target: str | Target, + modifiers: list[str] = None, + timeout: int = 60, + retries: int = 3, + ) -> "Match | Region": """ Click on an image or location and wait for another one to appear. @@ -705,7 +795,7 @@ def click_expect(self, click_image_or_location: Image | Location, """ for i in range(retries): if i > 0: - log.info("Retrying the mouse click (%s of %s)", i+1, retries) + log.info("Retrying the mouse click (%s of %s)", i + 1, retries) self.click(click_image_or_location, modifiers=modifiers) try: return self.wait(expect_target, timeout) @@ -715,9 +805,14 @@ def click_expect(self, click_image_or_location: Image | Location, raise error return self - def click_vanish(self, click_image_or_location: Image | Location, - expect_target: str | Target, modifiers: list[str] = None, - timeout: int = 60, retries: int = 3) -> "Region": + def click_vanish( + self, + click_image_or_location: Image | Location, + expect_target: str | Target, + modifiers: list[str] = None, + timeout: int = 60, + retries: int = 3, + ) -> "Region": """ Click on an image or location and wait for another one to disappear. @@ -730,7 +825,7 @@ def click_vanish(self, click_image_or_location: Image | Location, """ for i in range(retries): if i > 0: - log.info("Retrying the mouse click (%s of %s)", i+1, retries) + log.info("Retrying the mouse click (%s of %s)", i + 1, retries) self.click(click_image_or_location, modifiers=modifiers) try: return self.wait_vanish(expect_target, timeout) @@ -740,18 +835,23 @@ def click_vanish(self, click_image_or_location: Image | Location, raise error return self - def click_at_index(self, anchor: str | Target, index: int = 0, - find_number: int = 3, timeout: int = 10) -> "Match": + def click_at_index( + self, + anchor: str | Target, + index: int = 0, + find_number: int = 3, + timeout: int = 10, + ) -> "Match": """ Find all instances of an anchor image and click on the one with the desired index given that they are horizontally then vertically sorted. :param anchor: image to find all matches of :param index: index of the match to click on (assuming >=1 matches), - sorted according to their (x,y) coordinates + sorted according to their (x,y) coordinates :param find_number: expected number of matches which is necessary - for fast failure in case some elements are not visualized and/or - proper matching result + for fast failure in case some elements are not visualized and/or + proper matching result :param timeout: timeout before which the number of matches should be found :returns: match from finding the target of the desired index @@ -784,13 +884,17 @@ def click_at_index(self, anchor: str | Target, index: int = 0, self.find(anchor) sorted_targets = sorted(targets, key=lambda x: (x.x, x.y)) - logging.debug("Totally %s clicking matches found: %s", len(sorted_targets), - ["(%s, %s)" % (x.x, x.y) for x in sorted_targets]) + logging.debug( + "Totally %s clicking matches found: %s", + len(sorted_targets), + ["(%s, %s)" % (x.x, x.y) for x in sorted_targets], + ) self.click(sorted_targets[index]) return sorted_targets[index] - def mouse_down(self, target_or_location: "Match | Location | str | Target", - button: int = None) -> "Match | None": + def mouse_down( + self, target_or_location: "Match | Location | str | Target", button: int = None + ) -> "Match | None": """ Hold down an arbitrary mouse button on a target or location. @@ -806,8 +910,9 @@ def mouse_down(self, target_or_location: "Match | Location | str | Target", self.dc_backend.mouse_down(button) return match - def mouse_up(self, target_or_location: "Match | Location | str | Target", - button: int = None) -> "Match | None": + def mouse_up( + self, target_or_location: "Match | Location | str | Target", button: int = None + ) -> "Match | None": """ Release an arbitrary mouse button on a target or location. @@ -823,8 +928,12 @@ def mouse_up(self, target_or_location: "Match | Location | str | Target", self.dc_backend.mouse_up(button) return match - def mouse_scroll(self, target_or_location: "Match | Location | str | Target", - clicks: int = 10, horizontal: bool = False) -> "Match | None": + def mouse_scroll( + self, + target_or_location: "Match | Location | str | Target", + clicks: int = 10, + horizontal: bool = False, + ) -> "Match | None": """ Scroll the mouse for a number of clicks. @@ -835,15 +944,21 @@ def mouse_scroll(self, target_or_location: "Match | Location | str | Target", :returns: match from finding the target or nothing if scrolling on a known location """ match = self.hover(target_or_location) - log.debug("Scrolling the mouse %s for %s clicks at %s", - "horizontally" if horizontal else "vertically", - clicks, target_or_location) + log.debug( + "Scrolling the mouse %s for %s clicks at %s", + "horizontally" if horizontal else "vertically", + clicks, + target_or_location, + ) self.dc_backend.mouse_scroll(clicks, horizontal) return match - def drag_drop(self, src_target_or_location: "Match | Location | str | Target", - dst_target_or_location: "Match | Location | str | Target", - modifiers: list[str] = None) -> "Match | None": + def drag_drop( + self, + src_target_or_location: "Match | Location | str | Target", + dst_target_or_location: "Match | Location | str | Target", + modifiers: list[str] = None, + ) -> "Match | None": """ Drag from and drop at a target or location optionally holding special keys. @@ -857,8 +972,11 @@ def drag_drop(self, src_target_or_location: "Match | Location | str | Target", match = self.drop_at(dst_target_or_location, modifiers) return match - def drag_from(self, target_or_location: "Match | Location | str | Target", - modifiers: list[str] = None) -> "Match": + def drag_from( + self, + target_or_location: "Match | Location | str | Target", + modifiers: list[str] = None, + ) -> "Match": """ Drag from a target or location optionally holding special keys. @@ -871,7 +989,7 @@ def drag_from(self, target_or_location: "Match | Location | str | Target", if modifiers is not None: log.info("Holding the modifiers %s", " ".join(modifiers)) self.dc_backend.keys_toggle(modifiers, True) - #self.dc_backend.keys_toggle(["Ctrl"], True) + # self.dc_backend.keys_toggle(["Ctrl"], True) log.info("Dragging %s", target_or_location) self.dc_backend.mouse_down(self.LEFT_BUTTON) @@ -879,8 +997,11 @@ def drag_from(self, target_or_location: "Match | Location | str | Target", return match - def drop_at(self, target_or_location: "Match | Location | str | Target", - modifiers: list[str] = None) -> "Match": + def drop_at( + self, + target_or_location: "Match | Location | str | Target", + modifiers: list[str] = None, + ) -> "Match": """ Drop at a target or location optionally holding special keys. @@ -921,8 +1042,11 @@ def press_keys(self, keys: str | list[str]) -> "Region": self.dc_backend.keys_press(keys_list) return self - def press_at(self, keys: str | list[str], - target_or_location: "Match | Location | str | Target") -> "Match": + def press_at( + self, + keys: str | list[str], + target_or_location: "Match | Location | str | Target", + ) -> "Match": """ Press a single key or a list of keys simultaneously at a specified target or location. @@ -936,8 +1060,11 @@ def press_at(self, keys: str | list[str], self.dc_backend.keys_press(keys_list) return match - def _parse_keys(self, keys: str | list[str], - target_or_location: "Match | Location | str | Target" = None) -> list[str]: + def _parse_keys( + self, + keys: str | list[str], + target_or_location: "Match | Location | str | Target" = None, + ) -> list[str]: at_str = " at %s" % target_or_location if target_or_location else "" keys_list = [] @@ -953,14 +1080,18 @@ def _parse_keys(self, keys: str | list[str], raise # a key cannot be a string (text) key_strings.append(key) keys_list.append(key) - log.info("Pressing together keys '%s'%s", - "'+'".join(keystr for keystr in key_strings), - at_str) + log.info( + "Pressing together keys '%s'%s", + "'+'".join(keystr for keystr in key_strings), + at_str, + ) else: # if not a list (i.e. if a single key) key = keys try: - log.info("Pressing key '%s'%s", self.dc_backend.keymap.to_string(key), at_str) + log.info( + "Pressing key '%s'%s", self.dc_backend.keymap.to_string(key), at_str + ) # if not a special key (i.e. if a character key) except KeyError: if isinstance(key, int): @@ -971,8 +1102,13 @@ def _parse_keys(self, keys: str | list[str], keys_list.append(key) return keys_list - def press_expect(self, keys: list[str] | str, expect_target: str | Target, - timeout: int = 60, retries: int = 3) -> "Match": + def press_expect( + self, + keys: list[str] | str, + expect_target: str | Target, + timeout: int = 60, + retries: int = 3, + ) -> "Match": """ Press a key and wait for a target to appear. @@ -985,7 +1121,7 @@ def press_expect(self, keys: list[str] | str, expect_target: str | Target, """ for i in range(retries): if i > 0: - log.info("Retrying the key press (%s of %s)", i+1, retries) + log.info("Retrying the key press (%s of %s)", i + 1, retries) self.press_keys(keys) try: return self.wait(expect_target, timeout) @@ -993,8 +1129,13 @@ def press_expect(self, keys: list[str] | str, expect_target: str | Target, if i == retries - 1: raise error - def press_vanish(self, keys: list[str] | str, expect_target: str | Target, - timeout: int = 60, retries: int = 3) -> "Region": + def press_vanish( + self, + keys: list[str] | str, + expect_target: str | Target, + timeout: int = 60, + retries: int = 3, + ) -> "Region": """ Press a key and wait for a target to disappear. @@ -1007,7 +1148,7 @@ def press_vanish(self, keys: list[str] | str, expect_target: str | Target, """ for i in range(retries): if i > 0: - log.info("Retrying the key press (%s of %s)", i+1, retries) + log.info("Retrying the key press (%s of %s)", i + 1, retries) self.press_keys(keys) try: return self.wait_vanish(expect_target, timeout) @@ -1044,8 +1185,12 @@ def type_text(self, text: list[str] | str, modifiers: list[str] = None) -> "Regi self.dc_backend.keys_type(text_list, modifiers) return self - def type_at(self, text: list[str] | str, target_or_location: "Match | Location | str | Target", - modifiers: list[str] = None) -> "Match": + def type_at( + self, + text: list[str] | str, + target_or_location: "Match | Location | str | Target", + modifiers: list[str] = None, + ) -> "Match": """ Type a list of consecutive character keys (without special keys) at a specified target or location. @@ -1065,8 +1210,11 @@ def type_at(self, text: list[str] | str, target_or_location: "Match | Location | self.dc_backend.keys_type(text_list, modifiers) return match - def _parse_text(self, text: list[str] | str, - target_or_location: "Match | Location | str | Target" = None) -> list[str]: + def _parse_text( + self, + text: list[str] | str, + target_or_location: "Match | Location | str | Target" = None, + ) -> list[str]: at_str = " at %s" % target_or_location if target_or_location else "" text_list = [] @@ -1086,8 +1234,14 @@ def _parse_text(self, text: list[str] | str, return text_list """Mixed (form) methods""" - def click_at(self, anchor: "Match | Location | Target | str", - dx: int, dy: int, count: int = 1) -> "Region": + + def click_at( + self, + anchor: "Match | Location | Target | str", + dx: int, + dy: int, + count: int = 1, + ) -> "Region": """ Clicks on a relative location using a displacement from an anchor. @@ -1099,6 +1253,7 @@ def click_at(self, anchor: "Match | Location | Target | str", :raises: :py:class:`exceptions.ValueError` if `count` is not acceptable value """ from .match import Match + if isinstance(anchor, Match): start_loc = anchor.target elif isinstance(anchor, Location): @@ -1111,9 +1266,16 @@ def click_at(self, anchor: "Match | Location | Target | str", return self - def fill_at(self, anchor: "Match | Location | Target | str", - text: str, dx: int, dy: int, del_flag: bool = True, - esc_flag: bool = True, mark_clicks: int = 1) -> "Region": + def fill_at( + self, + anchor: "Match | Location | Target | str", + text: str, + dx: int, + dy: int, + del_flag: bool = True, + esc_flag: bool = True, + mark_clicks: int = 1, + ) -> "Region": """ Fills a new text at a text box using a displacement from an anchor. @@ -1161,10 +1323,18 @@ def fill_at(self, anchor: "Match | Location | Target | str", return self - def select_at(self, anchor: "Match | Location | Target | str", - image_or_index: str | int, dx: int, dy: int, dw: int = 0, - dh: int = 0, ret_flag: bool = True, mark_clicks: int = 1, - tries: int = 3) -> "Region": + def select_at( + self, + anchor: "Match | Location | Target | str", + image_or_index: str | int, + dx: int, + dy: int, + dw: int = 0, + dh: int = 0, + ret_flag: bool = True, + mark_clicks: int = 1, + tries: int = 3, + ) -> "Region": """ Select an option at a dropdown list using either an integer index or an option image if the order cannot be easily inferred. @@ -1221,10 +1391,14 @@ def select_at(self, anchor: "Match | Location | Target | str", # list, therefore a total of 2 option heights spanning the haystack height. # The haystack y displacement relative to 'loc' is then 1/2*1/2*dh loc = self.get_mouse_location() - dropdown_haystack = Region(xpos=int(loc.x - dw / 2), - ypos=int(loc.y - dh / 4), - width=dw, height=dh, - dc=self.dc_backend, cv=self.cv_backend) + dropdown_haystack = Region( + xpos=int(loc.x - dw / 2), + ypos=int(loc.y - dh / 4), + width=dw, + height=dh, + dc=self.dc_backend, + cv=self.cv_backend, + ) try: dropdown_haystack.click(image_or_index) except FindError: @@ -1232,7 +1406,15 @@ def select_at(self, anchor: "Match | Location | Target | str", if tries == 1: raise logging.info("Opening the dropdown menu didn't work, retrying") - self.select_at(anchor, image_or_index, dx, dy, dw, dh, - mark_clicks=mark_clicks, tries=tries-1) + self.select_at( + anchor, + image_or_index, + dx, + dy, + dw, + dh, + mark_clicks=mark_clicks, + tries=tries - 1, + ) return self diff --git a/guibot/target.py b/guibot/target.py index a2381911..d89cad46 100644 --- a/guibot/target.py +++ b/guibot/target.py @@ -38,7 +38,7 @@ from .errors import * -__all__ = ['Target', 'Image', 'Text', 'Pattern', 'Chain'] +__all__ = ["Target", "Image", "Text", "Pattern", "Chain"] class Target(object): @@ -70,7 +70,9 @@ def from_data_file(filename: str) -> "Target": elif extension == ".steps": target = Chain(name) else: - raise IncompatibleTargetFileError("The target file %s is not among any of the known types" % filename) + raise IncompatibleTargetFileError( + "The target file %s is not among any of the known types" % filename + ) return target @@ -88,7 +90,13 @@ def from_match_file(filename: str) -> "Target": match_filename = os.path.splitext(filename)[0] + ".match" finder = Finder.from_match_file(match_filename) - if finder.params["find"]["backend"] in ("autopy", "contour", "template", "feature", "tempfeat"): + if finder.params["find"]["backend"] in ( + "autopy", + "contour", + "template", + "feature", + "tempfeat", + ): target = Image(filename, match_settings=finder) elif finder.params["find"]["backend"] == "text": target = Text(name, match_settings=finder) @@ -144,6 +152,7 @@ def get_similarity(self) -> float: :returns: similarity required for the image to be matched """ return self.match_settings.params["find"]["similarity"].value + similarity = property(fget=get_similarity) def get_center_offset(self) -> Location: @@ -156,6 +165,7 @@ def get_center_offset(self) -> Location: it is then taken when matching to produce a clicking target for a match. """ return self._center_offset + center_offset = property(fget=get_center_offset) def load(self, filename: str, **kwargs: dict[str, type]) -> None: @@ -234,8 +244,13 @@ class Image(Target): _cache = {} - def __init__(self, image_filename: str = "", pil_image: PIL.Image.Image = None, - match_settings: "Finder" = None, use_cache: bool = True) -> None: + def __init__( + self, + image_filename: str = "", + pil_image: PIL.Image.Image = None, + match_settings: "Finder" = None, + use_cache: bool = True, + ) -> None: """ Build an image object. @@ -266,7 +281,11 @@ def __init__(self, image_filename: str = "", pil_image: PIL.Image.Image = None, def __str__(self) -> str: """Provide the image filename.""" - return "noname" if self._filename == "" else os.path.splitext(os.path.basename(self._filename))[0] + return ( + "noname" + if self._filename == "" + else os.path.splitext(os.path.basename(self._filename))[0] + ) def get_filename(self) -> str: """ @@ -275,6 +294,7 @@ def get_filename(self) -> str: :returns: filename of the image """ return self._filename + filename = property(fget=get_filename) def get_width(self) -> int: @@ -284,6 +304,7 @@ def get_width(self) -> int: :returns: width of the image """ return self._width + width = property(fget=get_width) def get_height(self) -> int: @@ -293,6 +314,7 @@ def get_height(self) -> int: :returns: height of the image """ return self._height + height = property(fget=get_height) def get_pil_image(self) -> PIL.Image.Image: @@ -302,9 +324,12 @@ def get_pil_image(self) -> PIL.Image.Image: :returns: image data of the image """ return self._pil_image + pil_image = property(fget=get_pil_image) - def load(self, filename: str, use_cache: bool = True, **kwargs: dict[str, type]) -> None: + def load( + self, filename: str, use_cache: bool = True, **kwargs: dict[str, type] + ) -> None: """ Load image from a file. @@ -320,7 +345,7 @@ def load(self, filename: str, use_cache: bool = True, **kwargs: dict[str, type]) self._pil_image = self._cache[filename] else: # load and cache image - self._pil_image = PIL.Image.open(filename).convert('RGB') + self._pil_image = PIL.Image.open(filename).convert("RGB") if use_cache: self._cache[filename] = self._pil_image self._filename = filename @@ -351,8 +376,12 @@ class Text(Target): using OCR or general text detection methods. """ - def __init__(self, value: str = None, text_filename: str = None, - match_settings: "Finder" = None) -> None: + def __init__( + self, + value: str = None, + text_filename: str = None, + match_settings: "Finder" = None, + ) -> None: """ Build a text object. @@ -374,7 +403,7 @@ def __init__(self, value: str = None, text_filename: str = None, def __str__(self) -> str: """Provide a part of the text value.""" - return self.value[:30].replace('/', '').replace('\\', '') + return self.value[:30].replace("/", "").replace("\\", "") def load(self, filename: str, **kwargs: dict[str, type]) -> None: """ @@ -408,17 +437,18 @@ def distance_to(self, str2: str) -> float: """ str1 = str(self.value) import numpy + M = numpy.empty((len(str1) + 1, len(str2) + 1), int) - for a in range(0, len(str1)+1): + for a in range(0, len(str1) + 1): M[a, 0] = a - for b in range(0, len(str2)+1): + for b in range(0, len(str2) + 1): M[0, b] = b - for a in range(1, len(str1)+1): # (size_t a = 1; a <= NA; ++a): - for b in range(1, len(str2)+1): # (size_t b = 1; b <= NB; ++b) - z = M[a-1, b-1] + (0 if str1[a-1] == str2[b-1] else 1) - M[a, b] = min(min(M[a-1, b] + 1, M[a, b-1] + 1), z) + for a in range(1, len(str1) + 1): # (size_t a = 1; a <= NA; ++a): + for b in range(1, len(str2) + 1): # (size_t b = 1; b <= NB; ++b) + z = M[a - 1, b - 1] + (0 if str1[a - 1] == str2[b - 1] else 1) + M[a, b] = min(min(M[a - 1, b] + 1, M[a, b - 1] + 1), z) return M[len(str1), len(str2)] @@ -522,6 +552,7 @@ def load(self, steps_filename: str, **kwargs: dict[str, type]) -> None: :raises: :py:class:`errors.UnsupportedBackendError` if a chain step is of unknown type :raises: :py:class:`IOError` if an chain step line cannot be parsed """ + def resolve_stepsfile(filename: str) -> str: """ Try to find a valid steps file from a given file name. @@ -546,7 +577,7 @@ def resolve_stepsfile(filename: str) -> str: while lines: step = lines.pop(0) - dataconfig = re.split(r'\t+', step.rstrip('\t\n')) + dataconfig = re.split(r"\t+", step.rstrip("\t\n")) # read a nested steps file and append to this chain if dataconfig[0].endswith(".steps"): @@ -572,12 +603,18 @@ def resolve_stepsfile(filename: str) -> str: data_and_config = Pattern(data, match_settings=self.match_settings) elif step_backend == "text": if data.endswith(".txt"): - data_and_config = Text(text_filename=data, match_settings=self.match_settings) + data_and_config = Text( + text_filename=data, match_settings=self.match_settings + ) else: - data_and_config = Text(value=data, match_settings=self.match_settings) + data_and_config = Text( + value=data, match_settings=self.match_settings + ) else: # in particular, we cannot have a chain within the chain since it is not useful - raise UnsupportedBackendError("No target step type for '%s' backend" % step_backend) + raise UnsupportedBackendError( + "No target step type for '%s' backend" % step_backend + ) self._steps.append(data_and_config) @@ -618,7 +655,9 @@ def save(self, steps_filename: str) -> None: data = data_and_config.filename else: # in particular, we cannot have a chain within the chain since it is not useful - raise UnsupportedBackendError("No target step type for '%s' backend" % step_backend) + raise UnsupportedBackendError( + "No target step type for '%s' backend" % step_backend + ) data_and_config.save(data) save_lines.append(data + "\t" + os.path.splitext(data)[0] + ".match\n")