From 636ed1d8ac1db61bbb6325fe7dfec4aa131a3b0e Mon Sep 17 00:00:00 2001 From: Jacob Callahan Date: Mon, 15 May 2023 17:08:40 -0400 Subject: [PATCH] Add initial support for a Beaker provider. This change adds a Beaker provider and its bind. For now, this relies on the beaker-client cli and config through that. Currently the only supported checkout/execute method is via job xml or and existing job id. Also, I removed the custom FileLock class since we have moved away from multiprocessing. Instead, we will just use threading locks. --- README.md | 2 + broker/binds/beaker.py | 224 ++++++++++++++++++ broker/broker.py | 15 +- broker/commands.py | 124 ++++------ broker/exceptions.py | 8 + broker/helpers.py | 99 ++++---- broker/hosts.py | 9 +- broker/providers/__init__.py | 59 ++++- broker/providers/ansible_tower.py | 41 ++-- broker/providers/beaker.py | 156 ++++++++++++ broker/providers/container.py | 19 +- broker/providers/test_provider.py | 4 +- setup.cfg | 1 + tests/data/beaker/job_result.json | 68 ++++++ tests/data/beaker/test_job.xml | 26 ++ .../beaker/checkout_test_job-2.yaml | 2 + .../beaker/checkout_test_job.yaml | 1 + tests/functional/README.md | 7 + tests/functional/test_rh_beaker.py | 95 ++++++++ tests/providers/test_beaker.py | 71 ++++++ tests/test_broker.py | 4 +- tests/test_helpers.py | 38 +-- 22 files changed, 876 insertions(+), 197 deletions(-) create mode 100644 broker/binds/beaker.py create mode 100644 broker/providers/beaker.py create mode 100644 tests/data/beaker/job_result.json create mode 100644 tests/data/beaker/test_job.xml create mode 100644 tests/data/cli_scenarios/beaker/checkout_test_job-2.yaml create mode 100644 tests/data/cli_scenarios/beaker/checkout_test_job.yaml create mode 100644 tests/functional/test_rh_beaker.py create mode 100644 tests/providers/test_beaker.py diff --git a/README.md b/README.md index fe88f3b5..ae65c72d 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,8 @@ Copy the example settings file to `broker_settings.yaml` and edit it. (optional) If you are using the Container provider, install the extra dependency based on your container runtime of choice with either `pip install broker[podman]` or `pip install broker[docker]`. +(optional) If you are using the Beaker provider, install the extra dependency with `dnf install krb5-devel` and then `pip install broker[beaker]`. + To run Broker outside of its base directory, specify the directory with the `BROKER_DIRECTORY` environment variable. Configure the `broker_settings.yaml` file to set configuration values for broker's interaction with its providers. diff --git a/broker/binds/beaker.py b/broker/binds/beaker.py new file mode 100644 index 00000000..d7f20bee --- /dev/null +++ b/broker/binds/beaker.py @@ -0,0 +1,224 @@ +import json +import subprocess +import time +from logzero import logger +from pathlib import Path +from xml.etree import ElementTree as ET +from broker import helpers +from broker.exceptions import BeakerBindError + + +def _elementree_to_dict(etree): + """Converts an ElementTree object to a dictionary""" + data = {} + if etree.attrib: + data.update(etree.attrib) + if etree.text: + data["text"] = etree.text + for child in etree: + child_data = _elementree_to_dict(child) + if (tag := child.tag) in data: + if not isinstance(data[tag], list): + data[tag] = [data[tag]] + data[tag].append(child_data) + else: + data[tag] = child_data + return data + + +def _curate_job_info(job_info_dict): + curated_info = { + "job_id": "id", + # "reservation_id": "current_reservation/recipe_id", + "whiteboard": "whiteboard/text", + "hostname": "recipeSet/recipe/system", + "distro": "recipeSet/recipe/distro" + } + return helpers.dict_from_paths(job_info_dict, curated_info) + + +class BeakerBind: + def __init__(self, hub_url, auth="krbv", **kwargs): + self.hub_url = hub_url + self._base_args = ["--insecure", f"--hub={self.hub_url}"] + if auth == "basic": + # If we're not using system kerberos auth, add in explicit basic auth + self.username = kwargs.pop("username", None) + self.password = kwargs.pop("password", None) + self._base_args.extend( + [ + f"--username {self.username}", + f"--password {self.password}", + ] + ) + self.__dict__.update(kwargs) + + def _exec_command(self, *cmd_args, **cmd_kwargs): + raise_on_error = cmd_kwargs.pop("raise_on_error", True) + exec_cmd, cmd_args = ["bkr"], list(cmd_args) + # check through kwargs and if any are True add to cmd_args + del_keys = [] + for k, v in cmd_kwargs.items(): + if isinstance(v, bool) or v is None: + del_keys.append(k) + if v is True: + cmd_args.append(f"--{k}" if not k.startswith("--") else k) + for k in del_keys: + del cmd_kwargs[k] + exec_cmd.extend(cmd_args) + exec_cmd.extend(self._base_args) + exec_cmd.extend([f"--{k.replace('_', '-')}={v}" for k, v in cmd_kwargs.items()]) + logger.debug(f"Executing beaker command: {exec_cmd}") + proc = subprocess.Popen( + exec_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout, stderr = proc.communicate() + result = helpers.Result( + stdout=stdout.decode(), + stderr=stderr.decode(), + status=proc.returncode, + ) + if result.status != 0 and raise_on_error: + raise BeakerBindError( + f"Beaker command failed:\n" + f"Command={' '.join(exec_cmd)}\n" + f"Result={result}", + ) + logger.debug(f"Beaker command result: {result.stdout}") + return result + + def job_submit(self, job_xml, wait=False): + # wait behavior seems buggy to me, so best to avoid it + if not Path(job_xml).exists(): + raise FileNotFoundError(f"Job XML file {job_xml} not found") + result = self._exec_command("job-submit", job_xml, wait=wait) + if not wait: + # get the job id from the output + # format is "Submitted: ['J:7849837'] where the number is the job id + for line in result.stdout.splitlines(): + if line.startswith("Submitted:"): + return line.split("'")[1].replace("J:", "") + + def job_watch(self, job_id): + job_id = f"J:{job_id}" if not job_id.startswith("J:") else job_id + return self._exec_command("job-watch", job_id) + + def job_results(self, job_id, format="beaker-results-xml", pretty=False): + job_id = f"J:{job_id}" if not job_id.startswith("J:") else job_id + return self._exec_command( + "job-results", job_id, format=format, prettyxml=pretty + ) + + def job_clone(self, job_id, wait=False, **kwargs): + job_id = f"J:{job_id}" if not job_id.startswith("J:") else job_id + return self._exec_command("job-clone", job_id, wait=wait, **kwargs) + + def job_list(self, *args, **kwargs): + return self._exec_command("job-list", *args, **kwargs) + + def job_cancel(self, job_id): + if not job_id.startswith("J:") and not job_id.startswith("RS:"): + job_id = f"J:{job_id}" + return self._exec_command("job-cancel", job_id) + + def job_delete(self, job_id): + job_id = f"J:{job_id}" if not job_id.startswith("J:") else job_id + return self._exec_command("job-delete", job_id) + + def system_release(self, system_id): + return self._exec_command("system-release", system_id) + + def system_list(self, **kwargs): + """Due to the number of arguments, we will not validate before submitting + + Accepted arguments are: + available available to be used by this user + free available to this user and not currently being used + removed which have been removed + mine owned by this user + type=TYPE of TYPE + status=STATUS with STATUS + pool=POOL in POOL + arch=ARCH with ARCH + dev-vendor-id=VENDOR-ID with a device that has VENDOR-ID + dev-device-id=DEVICE-ID with a device that has DEVICE-ID + dev-sub-vendor-id=SUBVENDOR-ID with a device that has SUBVENDOR-ID + dev-sub-device-id=SUBDEVICE-ID with a device that has SUBDEVICE-ID + dev-driver=DRIVER with a device that has DRIVER + dev-description=DESCRIPTION with a device that has DESCRIPTION + xml-filter=XML matching the given XML filter + host-filter=NAME matching pre-defined host filter + """ + # convert the flags passed in kwargs to arguments + args = [] + for key in {"available", "free", "removed", "mine"}: + if kwargs.pop(key, False): + args.append(f"--{key}") + return self._exec_command("system-list", *args, **kwargs) + + def user_systems(self): # to be used for inventory sync + result = self.system_list(mine=True, raise_on_error=False) + if result.status != 0: + return [] + else: + return result.stdout.splitlines() + + def system_details(self, system_id, format="json"): + return self._exec_command("system-details", system_id, format=format) + + def execute_job(self, job, max_wait="24h"): + """Submit a job, periodically checking the status until it completes + then return a dictionary of the results. + """ + if Path(job).exists(): # job xml path passed in + job_id = self.job_submit(job, wait=False) + else: # using a job id + job_id = self.job_clone(job) + logger.info(f"Submitted job: {job_id}") + _max_wait = time.time() + helpers.translate_timeout(max_wait or "24h") + while time.time() < _max_wait: + time.sleep(60) + result = self.job_results(job_id, pretty=True) + if 'result="Pass"' in result.stdout: + return _curate_job_info(_elementree_to_dict(ET.fromstring(result.stdout))) + elif 'result="Fail"' in result.stdout or "Exception: " in result.stdout: + raise BeakerBindError(f"Job {job_id} failed:\n{result}") + elif 'result="Warn"' in result.stdout: + res_dict = _elementree_to_dict(ET.fromstring(result.stdout)) + raise BeakerBindError(f"Job {job_id} was resulted in a warning. Status: {res_dict['status']}") + raise BeakerBindError(f"Job {job_id} did not complete within {max_wait}") + + def system_details_curated(self, system_id): + full_details = json.loads(self.system_details(system_id).stdout) + curated_details = { + "hostname": full_details["fqdn"], + "mac_address": full_details["mac_address"], + "owner": "{display_name} <{email_address}>".format( + display_name=full_details["owner"]["display_name"], + email_address=full_details["owner"]["email_address"], + ), + "id": full_details["id"], + } + if current_res := full_details.get("current_reservation"): + curated_details.update( + { + "reservation_id": current_res["recipe_id"], + "reserved_on": current_res.get("start_time"), + "expires_on": current_res.get("finish_time"), + "reserved_for": "{display_name} <{email_address}>".format( + display_name=current_res["user"]["display_name"], + email_address=current_res["user"]["email_address"], + ), + } + ) + return curated_details + + def jobid_from_system(self, system_hostname): + """Return the job id for the current reservation on the system""" + for job_id in json.loads(self.job_list(mine=True).stdout): + job_result = self.job_results(job_id, pretty=True) + job_detail = _curate_job_info(_elementree_to_dict(ET.fromstring(job_result.stdout))) + if job_detail["hostname"] == system_hostname: + return job_id diff --git a/broker/broker.py b/broker/broker.py index 2171f00a..d619fef5 100644 --- a/broker/broker.py +++ b/broker/broker.py @@ -152,7 +152,7 @@ def checkout(self): self._hosts.extend(hosts) helpers.update_inventory([host.to_dict() for host in hosts]) if err: - raise err + raise self.BrokerError(f"Error during checkout from {self}") from err return hosts if not len(hosts) == 1 else hosts[0] def execute(self, **kwargs): @@ -169,12 +169,11 @@ def execute(self, **kwargs): logger.info(f"Using provider {provider.__name__} for execution") return self._act(provider, method) - def nick_help(self): - """Use a provider's nick_help method to get argument information""" - if self._provider_actions: - provider, _ = PROVIDER_ACTIONS[[*self._provider_actions.keys()][0]] - logger.info(f"Querying provider {provider.__name__}") - self._act(provider, "nick_help", checkout=False) + def provider_help(self, provider_name): + """Use a provider's provider_help method to get argument information""" + provider = PROVIDERS[provider_name] + logger.info(f"Querying provider {provider.__name__}") + self._act(provider, "provider_help", checkout=False) def _checkin(self, host): logger.info(f"Checking in {host.hostname or host.name}") @@ -298,7 +297,7 @@ def sync_inventory(provider): prov_inventory = PROVIDERS[provider](**instance).get_inventory(additional_arg) curr_inventory = [ hostname if (hostname := host.get("hostname")) else host.get("name") - for host in helpers.load_inventory(filter=f"_broker_provider={provider}") + for host in helpers.load_inventory(filter=f'@inv._broker_provider == "{provider}"') ] helpers.update_inventory(add=prov_inventory, remove=curr_inventory) diff --git a/broker/commands.py b/broker/commands.py index e45fda7b..19ee7d49 100644 --- a/broker/commands.py +++ b/broker/commands.py @@ -4,9 +4,9 @@ import click from logzero import logger from broker import exceptions, helpers, settings -from broker.broker import PROVIDERS, PROVIDER_ACTIONS, Broker +from broker.broker import Broker from broker.logger import LOG_LEVEL -from broker import exceptions, helpers, settings +from broker.providers import PROVIDERS, PROVIDER_HELP signal.signal(signal.SIGINT, helpers.handle_keyboardinterrupt) @@ -39,17 +39,19 @@ class ExceptionHandler(click.Group): def __call__(self, *args, **kwargs): try: - return self.main(*args, **kwargs) + res = self.main(*args, **kwargs) + helpers.emit(return_code=0) + return res except Exception as err: if not isinstance(err, exceptions.BrokerError): err = exceptions.BrokerError(err) helpers.emit(return_code=err.error_code, error_message=str(err.message)) sys.exit(err.error_code) - helpers.emit(return_code=0) def provider_options(command): """Applies provider-specific decorators to each command this decorates""" + # import IPython; IPython.embed() for prov in PROVIDERS.values(): if prov.hidden: continue @@ -78,11 +80,18 @@ def populate_providers(click_group): group=click_group, name=prov, hidden=prov_class.hidden, - context_settings={"allow_extra_args": True, "ignore_unknown_options": True}, + context_settings={ + "allow_extra_args": True, + "ignore_unknown_options": True, + }, ) @click.pass_context def provider_cmd(ctx, *args, **kwargs): # the actual subcommand """Get information about a provider's actions""" + # add additional args flags to the kwargs + for arg in ctx.args: + if arg.startswith("--"): + kwargs[arg[2:]] = True # if additional arguments were passed, include them in the broker args # strip leading -- characters kwargs.update( @@ -92,24 +101,21 @@ def provider_cmd(ctx, *args, **kwargs): # the actual subcommand } ) broker_inst = Broker(**kwargs) - broker_inst.nick_help() + broker_inst.provider_help(ctx.info_name) # iterate through available actions and populate options from them - for action in ( - action - for action, prov_info in PROVIDER_ACTIONS.items() - if prov_info[0] == prov_class - ): - action = action.replace("_", "-") - plural = ( - action.replace("y", "ies") if action.endswith("y") else f"{action}s" - ) - provider_cmd = click.option( - f"--{plural}", is_flag=True, help=f"Get available {plural}" - )(provider_cmd) - provider_cmd = click.option( - f"--{action}", type=str, help=f"Get information about a {action}" - )(provider_cmd) + for option, (p_cls, is_flag) in PROVIDER_HELP.items(): + if p_cls is not prov_class: + continue + option = option.replace("_", "-") + if is_flag: + provider_cmd = click.option( + f"--{option}", is_flag=True, help=f"Get available {option}" + )(provider_cmd) + else: + provider_cmd = click.option( + f"--{option}", type=str, help=f"Get information about a {option}" + )(provider_cmd) provider_cmd = click.option( "--results-limit", type=int, @@ -189,17 +195,10 @@ def cli(version): @click.pass_context def checkout(ctx, background, nick, count, args_file, **kwargs): """Checkout or "create" a Virtual Machine broker instance - COMMAND: broker checkout --workflow "workflow-name" --workflow-arg1 something - or - COMMAND: broker checkout --nick "nickname" - - :param ctx: clicks context object - - :param background: run a new broker subprocess to carry out command - :param nick: shortcut for arguments saved in settings.yaml, passed in as a string + COMMAND: broker checkout --workflow "workflow-name" --workflow_arg1 something - :param args_file: this broker argument will be replaced with the contents of the file passed in + COMMAND: broker checkout --nick "nickname" """ broker_args = helpers.clean_dict(kwargs) if nick: @@ -218,8 +217,7 @@ def checkout(ctx, background, nick, count, args_file, **kwargs): ) if background: helpers.fork_broker() - broker_inst = Broker(**broker_args) - broker_inst.checkout() + Broker(**broker_args).checkout() @cli.group(cls=ExceptionHandler) @@ -242,17 +240,7 @@ def providers(): def checkin(vm, background, all_, sequential, filter): """Checkin or "remove" a VM or series of VM broker instances - COMMAND: broker checkin ||all - - :param vm: Hostname or local id of host - - :param background: run a new broker subprocess to carry out command - - :param all_: Flag for whether to checkin everything - - :param sequential: Flag for whether to run checkins sequentially - - :param filter: a filter string matching broker's specification + COMMAND: broker checkin ||--all """ if background: helpers.fork_broker() @@ -266,8 +254,7 @@ def checkin(vm, background, all_, sequential, filter): or all_ ): to_remove.append(Broker().reconstruct_host(host)) - broker_inst = Broker(hosts=to_remove) - broker_inst.checkin(sequential=sequential) + Broker(hosts=to_remove).checkin(sequential=sequential) @loggedcli() @@ -294,7 +281,7 @@ def inventory(details, sync, filter): if (display_name := host.get("hostname")) is None: display_name = host.get("name") if details: - logger.info(f"{num}: {display_name}, Details: {helpers.yaml_format(host)}") + logger.info(f"{num}: {display_name}:\n{helpers.yaml_format(host)}") else: logger.info(f"{num}: {display_name}") helpers.emit({"inventory": emit_data}) @@ -312,17 +299,7 @@ def inventory(details, sync, filter): def extend(vm, background, all_, sequential, filter, **kwargs): """Extend a host's lease time - COMMAND: broker extend || - - :param vm: Hostname, VM Name, or local id of host - - :param background: run a new broker subprocess to carry out command - - :param all_: Click option all - - :param sequential: Flag for whether to run extends sequentially - - :param filter: a filter string matching broker's specification + COMMAND: broker extend |||--all """ broker_args = helpers.clean_dict(kwargs) if background: @@ -330,10 +307,9 @@ def extend(vm, background, all_, sequential, filter, **kwargs): inventory = helpers.load_inventory(filter=filter) to_extend = [] for num, host in enumerate(inventory): - if str(num) in vm or host["hostname"] in vm or host["name"] in vm or all_: + if str(num) in vm or host["hostname"] in vm or host.get("name") in vm or all_: to_extend.append(Broker().reconstruct_host(host)) - broker_inst = Broker(hosts=to_extend, **broker_args) - broker_inst.extend(sequential=sequential) + Broker(hosts=to_extend, **broker_args).extend(sequential=sequential) @loggedcli() @@ -352,20 +328,12 @@ def duplicate(vm, background, count, all_, filter): """Duplicate a broker-procured vm COMMAND: broker duplicate ||all - - :param vm: Hostname or local id of host - - :param background: run a new broker subprocess to carry out command - - :param all_: Click option all - - :param filter: a filter string matching broker's specification """ if background: helpers.fork_broker() inventory = helpers.load_inventory(filter=filter) for num, host in enumerate(inventory): - if str(num) in vm or host["hostname"] in vm or host["name"] in vm or all_: + if str(num) in vm or host["hostname"] in vm or host.get("name") in vm or all_: broker_args = host.get("_broker_args") if broker_args: if count: @@ -399,21 +367,10 @@ def duplicate(vm, background, count, all_, filter): @click.pass_context def execute(ctx, background, nick, output_format, artifacts, args_file, **kwargs): """Execute an arbitrary provider action - COMMAND: broker execute --workflow "workflow-name" --workflow-arg1 something - or - COMMAND: broker execute --nick "nickname" - - :param ctx: clicks context object - - :param background: run a new broker subprocess to carry out command - :param nick: shortcut for arguments saved in settings.yaml, passed in as a string + COMMAND: broker execute --workflow "workflow-name" --workflow_arg1 something - :param output_format: change the format of the output to one of the choice options - - :param artifacts: AnsibleTower provider specific option for choosing what to return - - :param args_file: this broker argument will be replaced with the contents of the file passed in + COMMAND: broker execute --nick "nickname" """ broker_args = helpers.clean_dict(kwargs) if nick: @@ -432,8 +389,7 @@ def execute(ctx, background, nick, output_format, artifacts, args_file, **kwargs ) if background: helpers.fork_broker() - broker_inst = Broker(**broker_args) - result = broker_inst.execute() + result = Broker(**broker_args).execute() helpers.emit({"output": result}) if output_format == "raw": print(result) diff --git a/broker/exceptions.py b/broker/exceptions.py index 399bbed8..368cc525 100644 --- a/broker/exceptions.py +++ b/broker/exceptions.py @@ -43,3 +43,11 @@ def __init__(self, host=None, message="Unspecified exception"): if host: self.message = f"{host.hostname or host.name}: {message}" super().__init__(message=self.message) + + +class ContainerBindError(BrokerError): + error_code = 11 + + +class BeakerBindError(BrokerError): + error_code = 12 diff --git a/broker/helpers.py b/broker/helpers.py index cc61cf84..0f8c1ad2 100644 --- a/broker/helpers.py +++ b/broker/helpers.py @@ -1,6 +1,7 @@ """Miscellaneous helpers live here""" +import threading +import click import collections -from contextlib import contextmanager import getpass import inspect import json @@ -8,19 +9,20 @@ import sys import tarfile import time +import yaml from collections import UserDict, namedtuple from collections.abc import MutableMapping +from contextlib import contextmanager from copy import deepcopy +from logzero import logger from pathlib import Path from uuid import uuid4 -import yaml -from logzero import logger - from broker import exceptions, settings from broker import logger as b_log FilterTest = namedtuple("FilterTest", "haystack needle test") +INVENTORY_LOCK = threading.Lock() def clean_dict(in_dict): @@ -89,6 +91,39 @@ def flatten_dict(nested_dict, parent_key="", separator="_"): return dict(flattened) +def dict_from_paths(source_dict, paths): + """Given a dictionary of desired keys and nested paths, return a new dictionary + + example: + source_dict = { + "key1": "value1", + "key2": { + "nested1": "value2", + "nested2": { + "deep": "value3" + } + } + } + paths = { + "key1": "key1", + "key2": "key2/nested2/deep" + } + returns { + "key1": "value1", + "key2": "value3" + } + """ + result = {} + for key, path in paths.items(): + if "/" not in path: + result[key] = source_dict.get(path) + else: + top, rem = path.split("/", 1) + result.update(dict_from_paths(source_dict[top], {key: rem})) + return result + + + def eval_filter(filter_list, raw_filter, filter_key="inv"): """Run each filter through an eval to get the results""" filter_list = [ @@ -200,19 +235,19 @@ def update_inventory(add=None, remove=None): add = [] if remove and not isinstance(remove, list): remove = [remove] - with FileLock(settings.inventory_path): + with INVENTORY_LOCK: inv_data = load_inventory() if inv_data: settings.inventory_path.unlink() if remove: for host in inv_data[::-1]: - if host["hostname"] in remove or host["name"] in remove: + if host["hostname"] in remove or host.get("name") in remove: # iterate through new hosts and update with old host data if it would nullify for new_host in add: if ( host["hostname"] == new_host["hostname"] - or host["name"] == new_host["name"] + or host.get("name") == new_host.get("name") ): # update missing data in the new_host with the old_host data new_host.update(merge_dicts(new_host, host)) @@ -246,6 +281,7 @@ class Emitter: helpers.emit(key=value, another=5) helpers.emit({"key": "value", "another": 5}) """ + EMIT_LOCK = threading.Lock() def __init__(self, emit_file=None): """Can empty init and set the file later""" @@ -271,7 +307,7 @@ def emit_to_file(self, *args, **kwargs): for key in kwargs.keys(): if getattr(kwargs[key], "json", None): kwargs[key] = kwargs[key].json - with FileLock(self.file): + with self.EMIT_LOCK: curr_data = json.loads(self.file.read_text() or "{}") curr_data.update(kwargs) self.file.write_text(json.dumps(curr_data, indent=4, sort_keys=True)) @@ -349,12 +385,14 @@ def fork_broker(): def handle_keyboardinterrupt(*args): - choice = input( + choice = click.prompt( "\nEnding Broker while running won't end processes being monitored.\n" "Would you like to switch Broker to run in the background?\n" - "[y/n]: " + "[y/n]: ", + type=click.Choice(["y", "n"]), + default="n" ) - if choice.lower()[0] == "y": + if choice == "y": fork_broker() else: raise exceptions.BrokerError("Broker killed by user.") @@ -400,45 +438,6 @@ def simple_retry(cmd, cmd_args=None, cmd_kwargs=None, max_timeout=60, _cur_timeo simple_retry(cmd, cmd_args, cmd_kwargs, max_timeout, new_wait) -class FileLock: - """Basic file locking class that acquires and releases locks - recommended usage is the context manager which will handle everythign for you - - with FileLock("basic_file.txt") as basic_file: - basic_file.write("some text") - - basic_file is a Path object of the desired file - If a lock is already in place, FileLock will wait up to seconds - """ - - def __init__(self, file_name, timeout=10): - self.file = Path(file_name) - self.lock = Path(f"{self.file}.lock") - self.timeout = timeout - - def wait_file(self): - start = time.time() - while self.lock.exists(): - if (time.time() - start) < self.timeout: - time.sleep(1) - continue - else: - raise exceptions.BrokerError( - f"Timeout while attempting to open {self.file.absolute()}" - ) - self.lock.touch() - return self.file - - def return_file(self): - self.lock.unlink() - - def __enter__(self): - return self.wait_file() - - def __exit__(self, *tb_info): - self.return_file() - - class Result: """Dummy result class for presenting results in dot access""" diff --git a/broker/hosts.py b/broker/hosts.py index 2145085e..2ec5613d 100644 --- a/broker/hosts.py +++ b/broker/hosts.py @@ -102,16 +102,15 @@ def execute(self, command, timeout=None): return res def to_dict(self): + keep_keys = ( + "hostname", "_broker_provider", "_broker_args", "tower_inventory", "job_id", "_attrs" + ) ret_dict = { - "hostname": self.hostname, "name": getattr(self, "name", None), - "_broker_provider": self._broker_provider, "_broker_provider_instance": self._prov_inst.instance, "type": "host", - "_broker_args": self._broker_args, } - if hasattr(self, "tower_inventory"): - ret_dict["tower_inventory"] = self.tower_inventory + ret_dict.update({k: v for k, v in self.__dict__.items() if k in keep_keys}) return ret_dict def setup(self): diff --git a/broker/providers/__init__.py b/broker/providers/__init__.py index fe91e616..53e815f0 100644 --- a/broker/providers/__init__.py +++ b/broker/providers/__init__.py @@ -1,7 +1,8 @@ +import inspect from abc import ABCMeta, abstractmethod -import dynaconf from pathlib import Path +import dynaconf from broker import exceptions from broker.settings import settings from logzero import logger @@ -18,6 +19,8 @@ PROVIDERS = {} # action: (InterfaceClass, "method_name") PROVIDER_ACTIONS = {} +# action: (InterfaceClass, "method_name") +PROVIDER_HELP = {} class ProviderMeta(ABCMeta): @@ -29,18 +32,44 @@ def __new__(cls, name, bases, attrs): if name != "Provider": PROVIDERS[name] = new_cls logger.debug(f"Registered provider {name}") - for attr in attrs.values(): - if hasattr(attr, "_as_action"): - for action in attr._as_action: - PROVIDER_ACTIONS[action] = (new_cls, attr.__name__) - logger.debug(f"Registered action {action} for provider {name}") + for attr, obj in attrs.items(): + if attr == "provider_help": + # register the help options based on the function arguments + for name, param in inspect.signature(obj).parameters.items(): + if name not in ("self", "kwargs"): + # {name: (cls, is_flag)} + PROVIDER_HELP[name] = (new_cls, isinstance(param.default, bool)) + logger.debug( + f"Registered help option {name} for provider {name}" + ) + elif hasattr(obj, "_as_action"): + for action in obj._as_action: + PROVIDER_ACTIONS[action] = (new_cls, attr) + logger.debug( + f"Registered action {action} for provider {name}" + ) return new_cls + class Provider(metaclass=ProviderMeta): + """ + Abstract base class for all providers. + + This class should be subclassed by all provider implementations. It provides a + metaclass that registers provider classes and actions. + + Attributes: + _validators (list): A list of Dynaconf Validators specific to the provider. + hidden (bool): A flag to hide the provider from the CLI. + _checkout_options (list): A list of checkout options to add to each command. + _execute_options (list): A list of execute options to add to each command. + _fresh_settings (dynaconf.Dynaconf): A clone of the global settings object. + _sensitive_attrs (list): A list of sensitive attributes that should not be logged. + """ # Populate with a list of Dynaconf Validators specific to your provider _validators = [] - # Set to true if you don't want your provider shown in the CLI + # Used to hide the provider from the CLI hidden = False # Populate these to add your checkout and execute options to each command # _checkout_options = [click.option("--workflow", type=str, help="Help text")] @@ -112,7 +141,13 @@ def construct_host(self, host_cls, provider_params, **kwargs): return host_inst @abstractmethod - def nick_help(self): + def provider_help(self): + """These are the help options that will be added to the CLI + + Anything other than 'self' and 'kwargs' will be added as a help option + To specify a flag, set the default value to a boolean + Everything else should default to None + """ pass @abstractmethod @@ -135,6 +170,14 @@ def __repr__(self): ) return f"{self.__class__.__name__}({inner})" + @staticmethod + def auto_hide(cls): + """Decorator to hide providers from the CLI""" + if not settings.get(cls.__name__.upper(), False): + # import IPython; IPython.embed() + cls.hidden = True + return cls + @staticmethod def register_action(*as_names): """Decorator to register a provider action diff --git a/broker/providers/ansible_tower.py b/broker/providers/ansible_tower.py index 40d24f0b..db709362 100644 --- a/broker/providers/ansible_tower.py +++ b/broker/providers/ansible_tower.py @@ -77,8 +77,8 @@ def get_awxkit_and_uname( return versions.v2.get(), my_username +@Provider.auto_hide class AnsibleTower(Provider): - _validators = [ Validator("ANSIBLETOWER.release_workflow", default="remove-vm"), Validator("ANSIBLETOWER.extend_workflow", default="extend-vm"), @@ -559,7 +559,7 @@ def get_inventory(self, user=None): for inv in invs: inv_hosts = inv.get_related("hosts", page_size=200).results hosts.extend(inv_hosts) - with click.progressbar(hosts, label='Compiling host information') as hosts_bar: + with click.progressbar(hosts, label="Compiling host information") as hosts_bar: compiled_host_info = [self._compile_host_info(host) for host in hosts_bar] return compiled_host_info @@ -572,7 +572,7 @@ def extend(self, target_vm, new_expire_time=None): if new_inv := target_vm._broker_args.get("tower_inventory"): if new_inv != self._inventory: self._inventory = new_inv - if hasattr(self.__dict__, 'inventory'): + if hasattr(self.__dict__, "inventory"): del self.inventory # clear the cached value return self.execute( workflow=settings.ANSIBLETOWER.extend_workflow, @@ -581,11 +581,20 @@ def extend(self, target_vm, new_expire_time=None): or settings.ANSIBLETOWER.get("new_expire_time"), ) - @Provider.register_action("template", "inventory") - def nick_help(self, **kwargs): + def provider_help( + self, + workflows=False, + workflow=None, + job_templates=False, + job_template=None, + templates=False, + inventories=False, + inventory=None, + **kwargs, + ): """Get a list of extra vars and their defaults from a workflow""" results_limit = kwargs.get("results_limit", settings.ANSIBLETOWER.results_limit) - if workflow := kwargs.get("workflow"): + if workflow: wfjt = self.v2.workflow_job_templates.get(name=workflow).results.pop() default_inv = self.v2.inventory.get(id=wfjt.inventory).results.pop() logger.info( @@ -593,7 +602,7 @@ def nick_help(self, **kwargs): f"Accepted additional nick fields:\n{helpers.yaml_format(wfjt.extra_vars)}" f"tower_inventory: {default_inv['name']}" ) - elif kwargs.get("workflows"): + elif workflows: workflows = [ workflow.name for workflow in self.v2.workflow_job_templates.get( @@ -606,11 +615,11 @@ def nick_help(self, **kwargs): workflows = workflows if isinstance(workflows, list) else [workflows] workflows = "\n".join(workflows[:results_limit]) logger.info(f"Available workflows:\n{workflows}") - elif inventory := kwargs.get("inventory"): + elif inventory: inv = self.v2.inventory.get(name=inventory, kind="").results.pop() inv = {"Name": inv.name, "ID": inv.id, "Description": inv.description} logger.info(f"Accepted additional nick fields:\n{helpers.yaml_format(inv)}") - elif kwargs.get("inventories"): + elif inventories: inv = [ inv.name for inv in self.v2.inventory.get(kind="", page_size=1000).results @@ -620,7 +629,7 @@ def nick_help(self, **kwargs): inv = inv if isinstance(inv, list) else [inv] inv = "\n".join(inv[:results_limit]) logger.info(f"Available Inventories:\n{inv}") - elif job_template := kwargs.get("job_template"): + elif job_template: jt = self.v2.job_templates.get(name=job_template).results.pop() default_inv = self.v2.inventory.get(id=jt.inventory).results.pop() logger.info( @@ -628,7 +637,7 @@ def nick_help(self, **kwargs): f"Accepted additional nick fields:\n{helpers.yaml_format(jt.extra_vars)}" f"tower_inventory: {default_inv['name']}" ) - elif kwargs.get("job_templates"): + elif job_templates: job_templates = [ job_template.name for job_template in self.v2.job_templates.get(page_size=1000).results @@ -636,10 +645,14 @@ def nick_help(self, **kwargs): ] if res_filter := kwargs.get("results_filter"): job_templates = eval_filter(job_templates, res_filter, "res") - job_templates = job_templates if isinstance(job_templates, list) else [job_templates] + job_templates = ( + job_templates + if isinstance(job_templates, list) + else [job_templates] + ) job_templates = "\n".join(job_templates[:results_limit]) logger.info(f"Available job templates:\n{job_templates}") - elif kwargs.get("templates"): + elif templates: templates = list( { tmpl @@ -654,8 +667,6 @@ def nick_help(self, **kwargs): templates = templates if isinstance(templates, list) else [templates] templates = "\n".join(templates[:results_limit]) logger.info(f"Available templates:\n{templates}") - else: - logger.warning("That action is not yet implemented.") def release(self, name, broker_args=None): if broker_args is None: diff --git a/broker/providers/beaker.py b/broker/providers/beaker.py new file mode 100644 index 00000000..3749d44e --- /dev/null +++ b/broker/providers/beaker.py @@ -0,0 +1,156 @@ +import click +import inspect +from dynaconf import Validator +from logzero import logger +from broker import helpers +from broker.binds.beaker import BeakerBind +from broker.exceptions import BrokerError, ProviderError +from broker.hosts import Host +from broker.providers import Provider +from broker.settings import settings + +@Provider.auto_hide +class Beaker(Provider): + + _validators = [ + Validator("beaker.hub_url", must_exist=True), + Validator("beaker.max_job_wait", default="24h"), + ] + _checkout_options = [ + click.option( + "--job-xml", + type=click.Path(exists=True, dir_okay=False), + help="Path to the job XML file to submit", + ), + click.option( + "--job-id", + type=str, + help="Beaker job ID to clone", + ), + ] + _execute_options = [ + click.option( + "--job-xml", + type=str, + help="Path to the job XML file to submit", + ), + ] + _extend_options = [ + click.option( + "--extend-duration", + type=click.IntRange(1, 99), + help="Number of hours to extend the job. Must be between 1 and 99", + ) + ] + + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.hub_url = settings.beaker.hub_url + self.runtime = kwargs.pop("bind", BeakerBind)(self.hub_url, **kwargs) + + def _host_release(self): + caller_host = inspect.stack()[1][0].f_locals["host"] + if not (job_id := getattr(caller_host, "job_id", None)): + job_id = self.runtime.jobid_from_system(caller_host.hostname) + return self.release(caller_host.hostname, job_id) + + def _set_attributes(self, host_inst, broker_args=None, misc_attrs=None): + host_inst.__dict__.update( + { + "_prov_inst": self, + "_broker_provider": "Beaker", + "_broker_provider_instance": self.instance, + "_broker_args": broker_args, + "release": self._host_release, + } + ) + if isinstance(misc_attrs, dict): + host_inst._attrs = misc_attrs + + def _compile_host_info(self, host, broker_info=True): + """Compiles host information into a dictionary suitable for use in the inventory. + + :param host (beaker.host.Host): The host to compile information for. + + :return: A dictionary containing the compiled host information. + """ + curated_host_info = self.runtime.system_details_curated(host) + if broker_info: + curated_host_info.update( + { + "_broker_provider": "Beaker", + "_broker_provider_instance": self.instance, + "_broker_args": getattr(host, "_broker_args", {}), + } + ) + if not curated_host_info.get("job_id"): + curated_host_info["job_id"] = self.runtime.jobid_from_system( + curated_host_info["hostname"] + ) + return curated_host_info + + def construct_host(self, provider_params, host_classes, **kwargs): + """Constructs broker host from a beaker system information + + :param provider_params: a beaker system information dictionary + + :param host_classes: host object + + :return: constructed broker host object + """ + logger.debug( + f"constructing with {provider_params=}\n{host_classes=}\n{kwargs=}" + ) + if not provider_params: + host_inst = host_classes[kwargs.get("type", "host")](**kwargs) + # cont_inst = self._cont_inst_by_name(host_inst.name) + self._set_attributes(host_inst, broker_args=kwargs) + else: + host_info = self._compile_host_info(provider_params["hostname"], broker_info=False) + host_inst = host_classes[kwargs.get("type", "host")](**provider_params) + self._set_attributes(host_inst, broker_args=kwargs, misc_attrs=host_info) + return host_inst + + @Provider.register_action("job_xml", "job_id") + def submit_job(self, max_wait=None, **kwargs): + job = kwargs.get("job_xml") or kwargs.get("job_id") + max_wait = max_wait or settings.beaker.get("max_job_wait") + result = self.runtime.execute_job(job, max_wait) + logger.debug(f"Job completed with results: {result}") + return result + + def provider_help(self, jobs=False, job=None, **kwargs): + results_limit = kwargs.get("results_limit", settings.container.results_limit) + if job: + if not job.startswith("J:"): + job = f"J:{job}" + logger.info(self.runtime.job_clone(job, prettyxml=True, dryrun=True).stdout) + elif jobs: + result = self.runtime.job_list(**kwargs).stdout.splitlines() + if res_filter := kwargs.get("results_filter"): + result = helpers.eval_filter(result, res_filter, "res") + result = "\n".join(result[:results_limit]) + logger.info(f"Available jobs:\n{result}") + + + def release(self, host_name, job_id): + return self.runtime.job_cancel(job_id) + # return self.runtime.system_release(host_name) + + def extend(self, host_name, extend_duration=99): + try: + Host(hostname=host_name).execute( + f"/usr/bin/extendtesttime.sh {extend_duration}" + ) + except BrokerError as err: + raise ProviderError( + f"Failed to extend host {host_name}: {err}\n" + f"Try running: root@{host_name} /usr/bin/extendtesttime.sh {extend_duration}" + ) + + def get_inventory(self, *args): + hosts = self.runtime.user_systems() + with click.progressbar(hosts, label='Compiling host information') as hosts_bar: + compiled_host_info = [self._compile_host_info(host) for host in hosts_bar] + return compiled_host_info diff --git a/broker/providers/container.py b/broker/providers/container.py index 0471390f..31218c84 100644 --- a/broker/providers/container.py +++ b/broker/providers/container.py @@ -46,6 +46,7 @@ def get_runtime( ) +@Provider.auto_hide class Container(Provider): _validators = [ Validator("CONTAINER.runtime", default="podman"), @@ -210,15 +211,17 @@ def construct_host(self, provider_params, host_classes, **kwargs): self._set_attributes(host_inst, broker_args=kwargs, cont_inst=cont_inst) return host_inst - def nick_help(self, **kwargs): + def provider_help( + self, container_hosts=False, container_host=None, container_apps=False, **kwargs + ): """Useful information about container images""" results_limit = kwargs.get("results_limit", settings.container.results_limit) - if image := kwargs.get("container_host"): + if container_host: logger.info( - f"Information for {image} container-host:\n" - f"{helpers.yaml_format(self.runtime.image_info(image))}" + f"Information for {container_host} container-host:\n" + f"{helpers.yaml_format(self.runtime.image_info(container_host))}" ) - elif kwargs.get("container_hosts"): + elif container_hosts: images = [ img.tags[0] for img in self.runtime.images @@ -229,7 +232,7 @@ def nick_help(self, **kwargs): images = images if isinstance(images, list) else [images] images = "\n".join(images[:results_limit]) logger.info(f"Available host images:\n{images}") - elif kwargs.get("container_apps"): + elif container_apps: images = [img.tags[0] for img in self.runtime.images if img.tags] if res_filter := kwargs.get("results_filter"): images = helpers.eval_filter(images, res_filter, "res") @@ -260,9 +263,9 @@ def run_container(self, container_host, **kwargs): kwargs["name"] = self._gen_name() kwargs["ports"] = self._port_mapping(container_host, **kwargs) - envars = kwargs.get('environment', {}) + envars = kwargs.get("environment", {}) if isinstance(envars, str): - envars = {var.split('=')[0]: var.split('=')[1] for var in envars.split(',')} + envars = {var.split("=")[0]: var.split("=")[1] for var in envars.split(",")} # add some context information about the container's requester origin = helpers.find_origin() diff --git a/broker/providers/test_provider.py b/broker/providers/test_provider.py index 33344f0b..ec3771f9 100644 --- a/broker/providers/test_provider.py +++ b/broker/providers/test_provider.py @@ -67,8 +67,8 @@ def extend(self): def get_inventory(self, *args, **kwargs): return helpers.load_inventory( - filter=f"_broker_provider={self.__class__.__name__}" + filter=f'@inv._broker_provider == "{self.__class__.__name__}"' ) - def nick_help(self): + def provider_help(self): pass diff --git a/setup.cfg b/setup.cfg index c3e35152..ef47dd26 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,6 +41,7 @@ docker = docker paramiko podman = podman-py +beaker = beaker-client [options.entry_points] console_scripts = diff --git a/tests/data/beaker/job_result.json b/tests/data/beaker/job_result.json new file mode 100644 index 00000000..104c0317 --- /dev/null +++ b/tests/data/beaker/job_result.json @@ -0,0 +1,68 @@ +{ + "id": "1234567", + "owner": "test_user@testdom.com", + "result": "Pass", + "status": "Running", + "retention_tag": "scratch", + "whiteboard": { + "text": "Test Reserve Workflow" + }, + "recipeSet": { + "priority": "Normal", + "response": "ack", + "id": "7654321", + "recipe": { + "id": "1029384", + "job_id": "1234567", + "recipe_set_id": "7654321", + "result": "Pass", + "status": "Running", + "distro": "RHEL-8.7.0", + "arch": "x86_64", + "family": "RedHatEnterpriseLinux8", + "variant": "BaseOS", + "system": "fake.host.testdom.com", + "distroRequires": { + "and": { + "distro_family": { + "op": "=", + "value": "RedHatEnterpriseLinux8" + }, + "distro_variant": { + "op": "=", + "value": "BaseOS" + }, + "distro_name": { + "op": "=", + "value": "RHEL-8.7.0" + }, + "distro_arch": { + "op": "=", + "value": "x86_64" + } + } + }, + "hostRequires": { + "system_type": { + "value": "Machine" + } + }, + "logs": { + "log": [ + { + "href": "https: //beakerhost.testdom.com/recipes/1029384/logs/console.log", + "name": "console.log" + }, + { + "href": "https: //beakerhost.testdom.com/recipes/1029384/logs/anaconda.log", + "name": "anaconda.log" + }, + { + "href": "https: //beakerhost.testdom.com/recipes/1029384/logs/sys.log", + "name": "sys.log" + } + ] + } + } + } +} diff --git a/tests/data/beaker/test_job.xml b/tests/data/beaker/test_job.xml new file mode 100644 index 00000000..8a3212cb --- /dev/null +++ b/tests/data/beaker/test_job.xml @@ -0,0 +1,26 @@ + + Test Reserve Workflow + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/cli_scenarios/beaker/checkout_test_job-2.yaml b/tests/data/cli_scenarios/beaker/checkout_test_job-2.yaml new file mode 100644 index 00000000..3b392214 --- /dev/null +++ b/tests/data/cli_scenarios/beaker/checkout_test_job-2.yaml @@ -0,0 +1,2 @@ +job_xml: "tests/data/beaker/test_job.xml" +count: 2 diff --git a/tests/data/cli_scenarios/beaker/checkout_test_job.yaml b/tests/data/cli_scenarios/beaker/checkout_test_job.yaml new file mode 100644 index 00000000..104525f2 --- /dev/null +++ b/tests/data/cli_scenarios/beaker/checkout_test_job.yaml @@ -0,0 +1 @@ +job_xml: "tests/data/beaker/test_job.xml" diff --git a/tests/functional/README.md b/tests/functional/README.md index be141af4..4e48a9c2 100644 --- a/tests/functional/README.md +++ b/tests/functional/README.md @@ -17,3 +17,10 @@ Setup: - Make sure you have room for at least 4 hosts in your current SLA limit. Note: These tests take a while to run, up to around 45m. + +**Beaker Tests** + +Setup: +- Ensure you have setup both your Beaker and Kerberos config +- Ensure that your host_username and host_password match what's expected from the Beaker host. +- Tests are currently limited, but still take a while to run. Run times are dependent on Beaker availability. diff --git a/tests/functional/test_rh_beaker.py b/tests/functional/test_rh_beaker.py new file mode 100644 index 00000000..1460eea0 --- /dev/null +++ b/tests/functional/test_rh_beaker.py @@ -0,0 +1,95 @@ +from pathlib import Path +from tempfile import NamedTemporaryFile +import pytest +from click.testing import CliRunner +from broker import Broker +from broker.commands import cli +from broker.providers.beaker import Beaker +from broker.settings import inventory_path, settings_path + +SCENARIO_DIR = Path("tests/data/cli_scenarios/beaker") + + +@pytest.fixture(scope="module", autouse=True) +def skip_if_not_configured(): + try: + Beaker() + except Exception as err: + pytest.skip(f"Beaker is not configured correctly: {err}") + + +@pytest.fixture(scope="module") +def temp_inventory(): + """Temporarily move the local inventory, then move it back when done""" + backup_path = inventory_path.rename(f"{inventory_path.absolute()}.bak") + yield + CliRunner().invoke( + cli, ["checkin", "--all", "--filter", '@inv._broker_provider == "Beaker"'] + ) + inventory_path.unlink() + backup_path.rename(inventory_path) + + +@pytest.mark.parametrize( + "args_file", [f for f in SCENARIO_DIR.iterdir() if f.name.startswith("checkout_")] +) +def test_checkout_scenarios(args_file, temp_inventory): + result = CliRunner().invoke(cli, ["checkout", "--args-file", args_file]) + assert result.exit_code == 0 + + +# @pytest.mark.parametrize( +# "args_file", [f for f in SCENARIO_DIR.iterdir() if f.name.startswith("execute_")] +# ) +# def test_execute_scenarios(args_file): +# result = CliRunner().invoke(cli, ["execute", "--args-file", args_file]) +# assert result.exit_code == 0 + + +def test_inventory_sync(): + result = CliRunner().invoke(cli, ["inventory", "--sync", "Beaker"]) + assert result.exit_code == 0 + + +def test_jobs_list(): + result = CliRunner(mix_stderr=False).invoke(cli, ["providers", "Beaker", "--jobs", "--mine"]) + assert result.exit_code == 0 + + +# def test_job_query(): +# """This isn't possible until we can figure out how to capture logged output""" +# result = CliRunner().invoke( +# cli, ["providers", "Beaker", "--job", ""] +# ) +# assert result.exit_code == 0 + + +# ----- Broker API Tests ----- + +def test_beaker_host(): + with Broker(job_xml="tests/data/beaker/test_job.xml") as r_host: + res = r_host.execute("hostname") + assert res.stdout.strip() == r_host.hostname + remote_dir = "/tmp/fake" + r_host.session.sftp_write(str(settings_path.absolute()), f"{remote_dir}/") + res = r_host.execute(f"ls {remote_dir}") + assert str(settings_path.name) in res.stdout + with NamedTemporaryFile() as tmp: + r_host.session.sftp_read(f"{remote_dir}/{settings_path.name}", tmp.file.name) + data = r_host.session.sftp_read( + f"{remote_dir}/{settings_path.name}", return_data=True + ) + assert ( + settings_path.read_bytes() == Path(tmp.file.name).read_bytes() + ), "Local file is different from the received one" + assert ( + settings_path.read_bytes() == data + ), "Local file is different from the received one (return_data=True)" + assert data == Path(tmp.file.name).read_bytes(), "Received files do not match" + # test the tail_file context manager + tailed_file = f"{remote_dir}/tail_me.txt" + r_host.execute(f"echo 'hello world' > {tailed_file}") + with r_host.session.tail_file(tailed_file) as tf: + r_host.execute(f"echo 'this is a new line' >> {tailed_file}") + assert 'this is a new line' in tf.stdout + assert 'hello world' not in tf.stdout diff --git a/tests/providers/test_beaker.py b/tests/providers/test_beaker.py new file mode 100644 index 00000000..55d06f52 --- /dev/null +++ b/tests/providers/test_beaker.py @@ -0,0 +1,71 @@ +import json +import pytest +from pathlib import Path +from broker.providers.beaker import Beaker +from broker.binds.beaker import _curate_job_info +from broker.helpers import MockStub +from broker.hosts import Host + + +class BeakerBindStub(MockStub): + """This class stubs out the methods of the Beaker bind + + stubbing for: + - self.runtime.jobid_from_system(caller_host.hostname) + - self.runtime.release(caller_host.hostname, job_id) # no-op + - self.runtime.system_details_curated(host) + - self.runtime.execute_job(job_xml, max_wait) + - self.runtime.job_clone(job, prettyxml=True, dryrun=True).stdout + - self.runtime.system_release(host_name) # no-op + - self.runtime.job_cancel(job_id) # no-op + - self.runtime.job_list(**kwargs).stdout.splitlines() + - self.runtime.user_systems() + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.job_id = "1234567" + self.stdout = "1234567\n7654321\n" + + def jobid_from_system(self, hostname): + return self.job_id + + def system_details_curated(self, host): + return { + "hostname": "test.example.com", + "job_id": self.job_id, + "mac_address": "00:00:00:00:00:00", + "owner": "testuser ", + "id": "7654321", + "reservation_id": "1267", + "reserved_on": "2023-01-01 00:00:00", + "expires_on": "2025-01-01 00:00:00", + "reserved_for": "anotheruser " + } + + def execute_job(self, job_xml, max_wait): + return _curate_job_info(json.loads(Path("tests/data/beaker/job_result.json").read_text())) + + def user_systems(self): + return ["test.example.com", "test2.example.com"] + + + +@pytest.fixture(scope="function") +def bind_stub(): + yield BeakerBindStub() + + +@pytest.fixture(scope="function") +def beaker_stub(bind_stub): + yield Beaker(bind=bind_stub) + + +def test_empty_init(): + assert Beaker() + + +def test_host_creation(beaker_stub): + job_res = beaker_stub.submit_job("tests/data/beaker/test_job.xml") + host = beaker_stub.construct_host(job_res, {"host": Host}) + assert isinstance(host, Host) + assert host.hostname == "fake.host.testdom.com" diff --git a/tests/test_broker.py b/tests/test_broker.py index 899f814e..26b5e07b 100644 --- a/tests/test_broker.py +++ b/tests/test_broker.py @@ -54,14 +54,14 @@ def test_broker_checkin_n_sync_empty_hostname(): """Test that broker can checkin and sync inventory with a host that has empty hostname""" broker_inst = broker.Broker(nick="test_nick") broker_inst.checkout() - inventory = helpers.load_inventory() + inventory = helpers.load_inventory(filter='@inv._broker_provider == "TestProvider"') assert len(inventory) == 1 inventory[0]["hostname"] = None # remove the host from the inventory helpers.update_inventory(remove="test.host.example.com") # add the host back with no hostname helpers.update_inventory(add=inventory) - hosts = broker_inst.from_inventory() + hosts = broker_inst.from_inventory(filter='@inv._broker_provider == "TestProvider"') assert len(hosts) == 1 assert hosts[0].hostname is None broker_inst = broker.Broker(hosts=hosts) diff --git a/tests/test_helpers.py b/tests/test_helpers.py index 443cece4..30b3b39f 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -73,21 +73,6 @@ def test_emitter(tmp_file): assert written == {"test": "value", "another": 5, "thing": 13} -def test_lock_file_created(tmp_file): - with helpers.FileLock(tmp_file) as tf: - assert isinstance(tf, Path) - assert Path(f"{tf}.lock").exists() - - -def test_lock_timeout(tmp_file): - tmp_lock = Path(f"{tmp_file}.lock") - tmp_lock.touch() - with pytest.raises(exceptions.BrokerError) as exc: - with helpers.FileLock(tmp_file, timeout=1): - pass - assert str(exc.value).startswith("Timeout while attempting to open") - - def test_find_origin_simple(): origin = helpers.find_origin() assert len(origin) == 2 @@ -148,3 +133,26 @@ def test_eval_filter_chain(fake_inventory): """Test that a user can chain multiple filters together""" filtered = helpers.eval_filter(fake_inventory, "@inv[:3] | 'sat-jenkins' in @inv.name") assert len(filtered) == 1 + +def test_dict_from_paths_nested(): + source_dict = { + "person": { + "name": "John", + "age": 30, + "address": { + "street": "123 Main St", + "city": "Anytown", + "state": "CA", + "zip": "12345" + } + } + } + paths = { + "person_name": "person/name", + "person_zip": "person/address/zip" + } + result = helpers.dict_from_paths(source_dict, paths) + assert result == { + "person_name": "John", + "person_zip": "12345" + }