diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 944cef64f5f..3cb3a4d5668 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,8 +46,11 @@ jobs: - name: Autobuild uses: github/codeql-action/autobuild@v3 - - name: Cache cleanup + - name: Pip cache cleanup shell: bash + # CODEQL_PYTHON is only defined if dependencies were installed [0] + # [0] https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning?learn=code_security_actions&learnProduct=code-security#analyzing-python-dependencies + if: ${{ env.CODEQL_PYTHON != '' }} run: | $CODEQL_PYTHON -m pip cache info $CODEQL_PYTHON -m pip cache purge diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index 0bca607ab25..2e5f305ca51 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -92,7 +92,7 @@ jobs: echo "pip_cache_dir=$(python -m pip cache dir)" | tee -a $GITHUB_OUTPUT - name: Wheels cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ steps.pip_cache.outputs.pip_cache_dir }}/wheels key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-${{ hashFiles('requirements.txt', 'doc_requirements.txt') }}-${{ github.sha }} @@ -124,19 +124,21 @@ jobs: run: git tag -d 'v0.0.0.0' - name: Upload Documentation - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Documentation-${{matrix.pnl-version}}-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} retention-days: 1 path: docs/build/html - name: Store PR number - if: ${{ github.event_name == 'pull_request' }} + # The 'base' variant runs only on pull requests and has only one job + if: ${{ matrix.pnl-version == 'base' }} run: echo ${{ github.event.pull_request.number }} > ./pr_number.txt - name: Upload PR number for other workflows - if: ${{ github.event_name == 'pull_request' }} - uses: actions/upload-artifact@v3 + # The 'base' variant runs only on pull requests and has only one job + if: ${{ matrix.pnl-version == 'base' }} + uses: actions/upload-artifact@v4 with: name: pr_number path: ./pr_number.txt @@ -168,7 +170,7 @@ jobs: ref: gh-pages - name: Download branch docs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 path: _built_docs/${{ github.ref }} @@ -185,7 +187,7 @@ jobs: if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || github.ref == 'refs/heads/docs' - name: Download main docs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 # This overwrites files in current directory diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 34dd87b598e..83834f73be4 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -143,7 +143,7 @@ jobs: echo "pip_cache_dir=$(python -m pip cache dir)" | tee -a $GITHUB_OUTPUT - name: Wheels cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ steps.pip_cache.outputs.pip_cache_dir }}/wheels key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-${{ hashFiles('requirements.txt', 'dev_requirements.txt') }}-${{ github.sha }} @@ -163,22 +163,28 @@ jobs: # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Print test machine/env info + - name: Print numpy info shell: bash run: | python -c "import numpy; numpy.show_config()" + + - name: Print machine info + shell: bash + run: | case "$RUNNER_OS" in - Linux*) lscpu;; + Linux*) lscpu; lsmem;; + macOS*) sysctl -a | grep '^hw' ;; + Windows*) wmic cpu get description,currentclockspeed,NumberOfCores,NumberOfEnabledCore,NumberOfLogicalProcessors; wmic memorychip get capacity,speed,status,manufacturer ;; esac - name: Test with pytest timeout-minutes: 180 - run: pytest --junit-xml=tests_out.xml --verbosity=0 -n auto ${{ matrix.extra-args }} + run: pytest --junit-xml=tests_out.xml --verbosity=0 -n logical ${{ matrix.extra-args }} - name: Upload test results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} + name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}-${{ matrix.version-restrict }} path: tests_out.xml retention-days: 5 if: (success() || failure()) && ! contains(matrix.extra-args, 'forked') @@ -202,7 +208,8 @@ jobs: python setup.py sdist bdist_wheel - name: Upload dist packages - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 + if: matrix.version-restrict == '' with: name: dist-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} path: dist/ diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index 45cacf39c88..8df8c2bfef2 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -38,7 +38,7 @@ jobs: echo "wheel=$(ls *.whl)" >> $GITHUB_OUTPUT - name: Upload Python dist files - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Python-dist-files path: dist/ @@ -78,7 +78,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Python-dist-files path: dist/ @@ -126,7 +126,7 @@ jobs: pytest --junit-xml=tests_out.xml --verbosity=0 -n auto tests - name: Upload test results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: test-results-${{ matrix.os }}-${{ matrix.python-version }} path: tests_out.xml @@ -141,7 +141,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Python-dist-files path: dist/ @@ -175,7 +175,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Python-dist-files path: dist/ diff --git a/broken_trans_deps.txt b/broken_trans_deps.txt index e72e65d7a52..eb6372f61a2 100644 --- a/broken_trans_deps.txt +++ b/broken_trans_deps.txt @@ -29,3 +29,15 @@ cattrs != 23.1.1; python_version < '3.8' # cattrs==23.2.{1,2} breaks json serialization # https://github.com/python-attrs/cattrs/issues/453 cattrs != 23.2.1, != 23.2.2 + +# The following need at least sphinx-5 without indicating it in dependencies: +# * sphinxcontrib-applehelp >=1.0.8, +# * sphinxcontrib-devhelp >=1.0.6, +# * sphinxcontrib-htmlhelp >=2.0.5, +# * sphinxcontrib-serializinghtml >=1.1.10, +# * sphinxcontrib-qthelp >=1.0.7 +sphinxcontrib-applehelp <1.0.8 +sphinxcontrib-devhelp <1.0.6 +sphinxcontrib-htmlhelp <2.0.5 +sphinxcontrib-serializinghtml <1.1.10 +sphinxcontrib-qthelp <1.0.7 diff --git a/cuda_requirements.txt b/cuda_requirements.txt index 63e22850e71..3a4f02b4cc9 100644 --- a/cuda_requirements.txt +++ b/cuda_requirements.txt @@ -1 +1 @@ -pycuda >2018, <2023 +pycuda >2018, <2024 diff --git a/dev_requirements.txt b/dev_requirements.txt index e992d1087d4..3683bd99fa2 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<24.0 -pytest<7.4.4 +pytest<8.0.1 pytest-benchmark<4.0.1 pytest-cov<4.1.1 pytest-forked<1.7.0 diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index d1b2502ef18..224ac4d2aed 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1301,7 +1301,7 @@ def _get_compilation_state(self): "intensity"} # Prune subcomponents (which are enabled by type rather than a list) # that should be omitted - blacklist = { "objective_mechanism", "agent_rep", "projections"} + blacklist = { "objective_mechanism", "agent_rep", "projections", "shadow_inputs"} # Only mechanisms use "value" state, can execute 'until finished', # and need to track executions @@ -1426,7 +1426,8 @@ def _get_compilation_params(self): "randomization_dimension", "save_values", "save_samples", "max_iterations", "duplicate_keys", "search_termination_function", "state_feature_function", - "search_function", + "search_function", "weight", "exponent", "gating_signal_params", + "retain_old_simulation_data", # not used in compiled learning "learning_results", "learning_signal", "learning_signals", "error_matrix", "error_signal", "activation_input", diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index 35d955ee435..564dcc6a73d 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -1442,7 +1442,8 @@ def _function(self, elif operation == CROSS_ENTROPY: v1 = variable[0] v2 = variable[1] - combination = np.where(np.logical_and(v1 == 0, v2 == 0), 0.0, v1 * np.log(v2)) + both_zero = np.logical_and(v1 == 0, v2 == 0) + combination = v1 * np.where(both_zero, 0.0, np.log(v2, where=np.logical_not(both_zero))) else: raise FunctionError("Unrecognized operator ({0}) for LinearCombination function". format(operation.self.Operation.SUM)) diff --git a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py index b0d2145028b..96ae2c45292 100644 --- a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py @@ -26,6 +26,7 @@ import numpy as np from beartype import beartype +from scipy.special import erfinv from psyneulink._typing import Optional @@ -371,11 +372,6 @@ def _function(self, params=None, ): - try: - from scipy.special import erfinv - except: - raise FunctionError("The UniformToNormalDist function requires the SciPy package.") - mean = self._get_current_parameter_value(DIST_MEAN, context) standard_deviation = self._get_current_parameter_value(STANDARD_DEVIATION, context) random_state = self.parameters.random_state._get(context) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 86c5523d786..b54af944c10 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -806,7 +806,7 @@ def progress_callback(study, trial): optuna.logging.set_verbosity(optuna.logging.WARNING) study = optuna.create_study( - sampler=self.method, direction=self.direction + sampler=opt_func, direction=self.direction ) study.optimize( objfunc_wrapper_wrapper, diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index 190c0b764e4..bdb5d072c18 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -1207,7 +1207,8 @@ def _function(self, # MODIFIED CW 3/20/18: avoid divide by zero error by plugging in two zeros # FIX: unsure about desired behavior when v2 = 0 and v1 != 0 # JDC: returns [inf]; leave, and let it generate a warning or error message for user - result = -np.sum(np.where(np.logical_and(v1 == 0, v2 == 0), 0.0, v1 * np.log(v2))) + both_zero = np.logical_and(v1 == 0, v2 == 0) + result = -np.sum(v1 * np.where(both_zero, 0.0, np.log(v2, where=np.logical_not(both_zero)))) # Energy elif self.metric == ENERGY: diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index bdd7c81d369..c87886b5c52 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -301,7 +301,7 @@ def reset(self, previous_value=None, context=None): if previous_value is None: previous_value = self._get_current_parameter_value("initializer", context) - if previous_value is None or previous_value == []: + if previous_value is None or np.asarray(previous_value).size == 0: self.parameters.previous_value._get(context).clear() value = deque([], maxlen=self.parameters.history.get(context)) @@ -1752,7 +1752,7 @@ def _get_distance(self, cue:Union[list, np.ndarray], field_weights = self._get_current_parameter_value('distance_field_weights', context) # Set any items in field_weights to None if they are None or an empty list: field_weights = np.atleast_1d([None if - fw is None or fw == [] or isinstance(fw, np.ndarray) and fw.tolist()==[] + fw is None or np.asarray(fw).size == 0 else fw for fw in field_weights]) if granularity == 'per_field': @@ -1763,7 +1763,7 @@ def _get_distance(self, cue:Union[list, np.ndarray], if len(field_weights)==1: field_weights = np.full(num_fields, field_weights[0]) for i in range(num_fields): - if not any([item is None or item == [] or isinstance(item, np.ndarray) and item.tolist() == [] + if not any([item is None or np.asarray(item).size == 0 for item in [cue[i], candidate[i], field_weights[i]]]): distances_by_field[i] = distance_fct([cue[i], candidate[i]]) * field_weights[i] return list(distances_by_field) @@ -2623,7 +2623,7 @@ def reset(self, previous_value=None, context=None): if previous_value is None: previous_value = self._get_current_parameter_value("initializer", context) - if previous_value == []: + if np.asarray(previous_value).size == 0: value = np.ndarray(shape=(2, 0, len(self.defaults.variable[0]))) self.parameters.previous_value._set(copy.deepcopy(value), context) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index a019eff7668..3a4bafca984 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -3053,7 +3053,7 @@ def _gen_llvm_output_port_parse_variable(self, ctx, builder, if name == OWNER_VALUE: data = value elif name in self.llvm_state_ids: - data = pnlvm.helpers.get_state_ptr(builder, self, mech_state, name) + data = ctx.get_param_or_state_ptr(builder, self, name, state_struct_ptr=mech_state) else: data = None diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 2c0559a93f4..3fd505d122d 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -3358,13 +3358,15 @@ def _gen_llvm_evaluate_alloc_range_function(self, *, ctx:pnlvm.LLVMBuilderContex nodes_params = pnlvm.helpers.get_param_ptr(builder, self.composition, params, "nodes") - my_idx = self.composition._get_node_index(self) - my_params = builder.gep(nodes_params, [ctx.int32_ty(0), - ctx.int32_ty(my_idx)]) - num_trials_per_estimate_ptr = pnlvm.helpers.get_param_ptr(builder, self, - my_params, "num_trials_per_estimate") + controller_idx = self.composition._get_node_index(self) + controller_params = builder.gep(nodes_params, + [ctx.int32_ty(0), ctx.int32_ty(controller_idx)]) + num_trials_per_estimate_ptr = ctx.get_param_or_state_ptr(builder, + self, + "num_trials_per_estimate", + param_struct_ptr=controller_params) func_params = pnlvm.helpers.get_param_ptr(builder, self, - my_params, "function") + controller_params, "function") search_space = pnlvm.helpers.get_param_ptr(builder, self.function, func_params, "search_space") @@ -3428,7 +3430,7 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags=froz assert self.composition.controller is self assert self.composition is self.agent_rep nodes_states = pnlvm.helpers.get_state_ptr(builder, self.composition, - comp_state, "nodes", None) + comp_state, "nodes") nodes_params = pnlvm.helpers.get_param_ptr(builder, self.composition, comp_params, "nodes") @@ -3442,15 +3444,16 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags=froz assert len(self.output_ports) == len(allocation_sample.type.pointee) controller_out = builder.gep(comp_data, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(controller_idx)]) - all_op_state = pnlvm.helpers.get_state_ptr(builder, self, - controller_state, "output_ports") - all_op_params = pnlvm.helpers.get_param_ptr(builder, self, - controller_params, "output_ports") + all_op_params, all_op_states = ctx.get_param_or_state_ptr(builder, + self, + "output_ports", + param_struct_ptr=controller_params, + state_struct_ptr=controller_state) for i, op in enumerate(self.output_ports): op_idx = ctx.int32_ty(i) op_f = ctx.import_llvm_function(op, tags=frozenset({"simulation"})) - op_state = builder.gep(all_op_state, [ctx.int32_ty(0), op_idx]) + op_state = builder.gep(all_op_states, [ctx.int32_ty(0), op_idx]) op_params = builder.gep(all_op_params, [ctx.int32_ty(0), op_idx]) op_in = builder.alloca(op_f.args[2].type.pointee) op_out = builder.gep(controller_out, [ctx.int32_ty(0), op_idx]) @@ -3483,9 +3486,10 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags=froz # Determine simulation counts - num_trials_per_estimate_ptr = pnlvm.helpers.get_param_ptr(builder, self, - controller_params, - "num_trials_per_estimate") + num_trials_per_estimate_ptr = ctx.get_param_or_state_ptr(builder, + self, + "num_trials_per_estimate", + param_struct_ptr=controller_params) num_trials_per_estimate = builder.load(num_trials_per_estimate_ptr, "num_trials_per_estimate") diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index 0298ff4f733..221eac63850 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -443,7 +443,7 @@ **noise** (it must be the same length as the Mechanism's `variable `), in which case each element is applied Hadamard (elementwise) to the result, as shown here:: - >>> my_linear_tm.noise = [1.0,1.2,.9] + >>> my_linear_tm.noise.base = [1.0,1.2,.9] >>> my_linear_tm.execute([1.0, 1.0, 1.0]) array([[2. , 2.2, 1.9]]) diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index fe8f069bae6..2c3f401707b 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -1114,87 +1114,3 @@ def compute_costs(self, intensity, context=None): combined_cost = self.combine_costs_function(all_costs, context=context).astype(float) return max(0.0, combined_cost) - - def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, - extra_args=[], tags:frozenset): - if "costs" in tags: - assert len(extra_args) == 0 - return self._gen_llvm_costs(ctx=ctx, tags=tags) - - return super()._gen_llvm_function(ctx=ctx, extra_args=extra_args, tags=tags) - - def _gen_llvm_costs(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): - args = [ctx.get_param_struct_type(self).as_pointer(), - ctx.get_state_struct_type(self).as_pointer(), - ctx.get_input_struct_type(self).as_pointer()] - - assert "costs" in tags - builder = ctx.create_llvm_function(args, self, str(self) + "_costs", - tags=tags, - return_type=ctx.float_ty) - - params, state, arg_in = builder.function.args - - func_params = pnlvm.helpers.get_param_ptr(builder, self, params, - "function") - func_state = pnlvm.helpers.get_state_ptr(builder, self, state, - "function") - - # FIXME: This allows INTENSITY and NONE - assert self.cost_options & ~CostFunctions.INTENSITY == CostFunctions.NONE - - cfunc = ctx.import_llvm_function(self.function.combine_costs_fct) - cfunc_in = builder.alloca(cfunc.args[2].type.pointee, - name="combine_costs_func_in") - - # Set to 0 by default - builder.store(cfunc_in.type.pointee(None), cfunc_in) - - cost_funcs = 0 - if self.cost_options & CostFunctions.INTENSITY: - ifunc = ctx.import_llvm_function(self.function.intensity_cost_fct) - - ifunc_params = pnlvm.helpers.get_param_ptr(builder, self.function, - func_params, - "intensity_cost_fct") - ifunc_state = pnlvm.helpers.get_state_ptr(builder, self.function, - func_state, - "intensity_cost_fct") - # Port input is struct { data input, modulations } if there are modulations, - # otherwise it's just data_input - if len(self.mod_afferents) > 0: - ifunc_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) - else: - ifunc_in = arg_in - # point output to the proper slot in comb func input - assert cost_funcs == 0, "Intensity should be the first cost function!" - ifunc_out = builder.gep(cfunc_in, [ctx.int32_ty(0), ctx.int32_ty(cost_funcs)]) - if ifunc_out.type != ifunc.args[3].type: - warnings.warn("Shape mismatch: {} element of combination func input ({}) doesn't match INTENSITY cost output ({})".format( - cost_funcs, self.function.combine_costs_fct.defaults.variable, - self.function.intensity_cost_fct.defaults.value), - pnlvm.PNLCompilerWarning) - assert self.cost_options == CostFunctions.INTENSITY - ifunc_out = cfunc_in - - builder.call(ifunc, [ifunc_params, ifunc_state, ifunc_in, ifunc_out]) - - cost_funcs += 1 - - - # Call combination function - cfunc_params = pnlvm.helpers.get_param_ptr(builder, self.function, - func_params, - "combine_costs_fct") - cfunc_state = pnlvm.helpers.get_state_ptr(builder, self.function, - func_state, - "combine_costs_fct") - cfunc_out = builder.alloca(cfunc.args[3].type.pointee, - name="combine_costs_func_out") - builder.call(cfunc, [cfunc_params, cfunc_state, cfunc_in, cfunc_out]) - - - ret_val = pnlvm.helpers.load_extract_scalar_array_one(builder, cfunc_out) - builder.ret(ret_val) - - return builder.function diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 28588a87c0f..8f5b6db03fd 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -2360,8 +2360,11 @@ def _get_input_struct_type(self, ctx): def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): port_f = ctx.import_llvm_function(self.function) - base_params = pnlvm.helpers.get_param_ptr(builder, self, params, - "function") + base_params, f_state = ctx.get_param_or_state_ptr(builder, + self, + "function", + param_struct_ptr=params, + state_struct_ptr=state) if any(a.sender.modulation != OVERRIDE for a in self.mod_afferents): # Create a local copy of the function parameters only if @@ -2426,12 +2429,13 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, if arg_out.type != port_f.args[3].type: assert len(arg_out.type.pointee) == 1 arg_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) + # Extract the data part of input if len(self.mod_afferents) == 0: f_input = arg_in else: f_input = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) - f_state = pnlvm.helpers.get_state_ptr(builder, self, state, "function") + builder.call(port_f, [f_params, f_state, f_input, arg_out]) return builder @@ -2998,7 +3002,7 @@ def _parse_port_spec(port_type=None, port_type_name = port_type.__name__ proj_is_feedback = False - if isinstance(port_specification, tuple) and port_specification[1] == FEEDBACK: + if isinstance(port_specification, tuple) and str(port_specification[1]) == FEEDBACK: port_specification = port_specification[0] proj_is_feedback = True diff --git a/psyneulink/core/components/projections/projection.py b/psyneulink/core/components/projections/projection.py index 8ae97232406..cdf46687532 100644 --- a/psyneulink/core/components/projections/projection.py +++ b/psyneulink/core/components/projections/projection.py @@ -1117,8 +1117,11 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, builder.store(builder.load(arg_in), arg_out) return builder - mf_state = pnlvm.helpers.get_state_ptr(builder, self, state, self.parameters.function.name) - mf_params = pnlvm.helpers.get_param_ptr(builder, self, params, self.parameters.function.name) + mf_params, mf_state = ctx.get_param_or_state_ptr(builder, + self, + self.parameters.function, + param_struct_ptr=params, + state_struct_ptr=state) main_function = ctx.import_llvm_function(self.function) builder.call(main_function, [mf_params, mf_state, arg_in, arg_out]) diff --git a/psyneulink/core/globals/mdf.py b/psyneulink/core/globals/mdf.py index d898bb8394a..a422db02620 100644 --- a/psyneulink/core/globals/mdf.py +++ b/psyneulink/core/globals/mdf.py @@ -954,6 +954,10 @@ def _generate_composition_string(graph, component_identifiers): psyneulink.LearningMechanism, psyneulink.LearningProjection, ) + implicit_roles = ( + psyneulink.NodeRole.LEARNING, + ) + output = [] comp_identifer = parse_valid_identifier(graph.id) @@ -1090,6 +1094,22 @@ def alphabetical_order(items): control_mechanisms = [] implicit_mechanisms = [] + try: + node_roles = { + parse_valid_identifier(node): role for (node, role) in + graph.metadata['required_node_roles'] + } + except KeyError: + node_roles = [] + + try: + excluded_node_roles = { + parse_valid_identifier(node): role for (node, role) in + graph.metadata['excluded_node_roles'] + } + except KeyError: + excluded_node_roles = [] + # add nested compositions and mechanisms in order they were added # to this composition for node in sorted( @@ -1104,10 +1124,19 @@ def alphabetical_order(items): except (AttributeError, KeyError): component_type = default_node_type identifier = parse_valid_identifier(node.id) + + try: + node_role = eval(_parse_parameter_value(node_roles[identifier])) + except (KeyError, TypeError): + node_role = None + if issubclass(component_type, control_mechanism_types): control_mechanisms.append(node) component_identifiers[identifier] = True - elif issubclass(component_type, implicit_types): + elif ( + issubclass(component_type, implicit_types) + or node_role in implicit_roles + ): implicit_mechanisms.append(node) else: mechanisms.append(node) @@ -1166,23 +1195,6 @@ def alphabetical_order(items): if len(compositions) > 0: output.append('') - # generate string to add the nodes to this Composition - try: - node_roles = { - parse_valid_identifier(node): role for (node, role) in - graph.metadata['required_node_roles'] - } - except KeyError: - node_roles = [] - - try: - excluded_node_roles = { - parse_valid_identifier(node): role for (node, role) in - graph.metadata['excluded_node_roles'] - } - except KeyError: - excluded_node_roles = [] - # do not add the controller as a normal node try: controller_name = graph.metadata['controller']['id'] @@ -1383,10 +1395,11 @@ def get_declared_identifiers(model): for i in range(len(comp_strs)): # greedy and non-greedy for cs in comp_strs[i]: - potential_module_names = set([ + cs_potential_names = set([ *re.findall(r'([A-Za-z_\.]+)\.', cs), *re.findall(r'([A-Za-z_\.]+?)\.', cs) ]) + potential_module_names.update(cs_potential_names) for module in potential_module_names: if module not in component_identifiers: @@ -1556,7 +1569,9 @@ def write_mdf_file(compositions, filename: str, path: str = None, fmt: str = Non not specified then the current directory is used. fmt : str - specifies file format of output. Current options ('json', 'yml'/'yaml') + specifies file format of output. Auto-detect based on + **filename** extension if None. + Current options: 'json', 'yml'/'yaml' simple_edge_format : bool specifies use of @@ -1567,8 +1582,8 @@ def write_mdf_file(compositions, filename: str, path: str = None, fmt: str = Non if fmt is None: try: - fmt = re.match(r'(.*)\.(.*)$', filename).groups(1) - except AttributeError: + fmt = re.match(r'(.*)\.(.*)$', filename).groups()[1] + except (AttributeError, IndexError): fmt = 'json' if path is not None: diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index e7f84c62e9e..dd5a384d481 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -820,7 +820,8 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): for idx, node in enumerate(composition._all_nodes): node_state = builder.gep(nodes_states, [ctx.int32_ty(0), ctx.int32_ty(idx)]) - num_exec_locs[node] = helpers.get_state_ptr(builder, node, + num_exec_locs[node] = helpers.get_state_ptr(builder, + node, node_state, "num_executions") @@ -1056,7 +1057,7 @@ def gen_composition_run(ctx, composition, *, tags:frozenset): node_state = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(idx)]) num_executions_ptr = helpers.get_state_ptr(builder, node, node_state, "num_executions") num_exec_time_ptr = builder.gep(num_executions_ptr, [ctx.int32_ty(0), ctx.int32_ty(TimeScale.RUN.value)]) - builder.store(num_exec_time_ptr.type.pointee(0), num_exec_time_ptr) + builder.store(num_exec_time_ptr.type.pointee(None), num_exec_time_ptr) # Allocate and initialize condition structure cond_gen = helpers.ConditionGenerator(ctx, composition) diff --git a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py index c2fe17f2936..2ce1e6afeab 100644 --- a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py @@ -891,10 +891,10 @@ def _gen_llvm_mechanism_functions(self, ctx, builder, m_base_params, m_params, m m_val = builder.alloca(mech_out_ty, name="mechanism_out") # Load mechanism parameters - scaling_factor_ptr = pnlvm.helpers.get_param_ptr(builder, self, m_params, - "scaling_factor_gain") - base_factor_ptr = pnlvm.helpers.get_param_ptr(builder, self, m_params, - "base_level_gain") + scaling_factor_ptr = ctx.get_param_or_state_ptr(builder, self, "scaling_factor_gain", + param_struct_ptr=m_params) + base_factor_ptr = ctx.get_param_or_state_ptr(builder, self, "base_level_gain", + param_struct_ptr=m_params) # If modulated, parameters are single element array scaling_factor = pnlvm.helpers.load_extract_scalar_array_one(builder, scaling_factor_ptr) diff --git a/psyneulink/library/compositions/compiledoptimizer.py b/psyneulink/library/compositions/compiledoptimizer.py index aea8d4ebd45..20aa5e673ca 100644 --- a/psyneulink/library/compositions/compiledoptimizer.py +++ b/psyneulink/library/compositions/compiledoptimizer.py @@ -90,10 +90,11 @@ def step(self, ctx): name = self._composition.name + "_ADAM_STEP" args = [self._get_optimizer_struct_type(ctx).as_pointer(), + ctx.get_state_struct_type(self._composition).as_pointer(), ctx.get_param_struct_type(self._composition).as_pointer()] builder = ctx.create_llvm_function(args, self, name) llvm_func = builder.function - optim_struct, params = llvm_func.args + optim_struct, state, params = llvm_func.args # setup values zero = ctx.int32_ty(0) @@ -182,7 +183,7 @@ def step(self, ctx): pnlvm.helpers.printf_float_matrix(builder, delta_w_ptr, prefix=f"grad val: {proj.sender._mechanism} -> {proj.receiver._mechanism}\n", override_debug=False) # this is messy - #TODO - cleanup this - weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, params) + weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) dim_x, dim_y = proj.matrix.shape weight_row = None @@ -233,10 +234,11 @@ def step(self, ctx): name = self._composition.name + "_SGD_STEP" args = [self._get_optimizer_struct_type(ctx).as_pointer(), + ctx.get_state_struct_type(self._composition).as_pointer(), ctx.get_param_struct_type(self._composition).as_pointer()] builder = ctx.create_llvm_function(args, self, name) llvm_func = builder.function - optim_struct, params = llvm_func.args + optim_struct, state, params = llvm_func.args zero = ctx.int32_ty(0) delta_w = builder.gep(optim_struct, [zero, ctx.int32_ty(self._DELTA_W_NUM)]) @@ -246,7 +248,7 @@ def step(self, ctx): # update weights for idx, proj in enumerate(self._pytorch_model.projection_wrappers): delta_w_ptr = builder.gep(delta_w, [zero, ctx.int32_ty(idx)]) - weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, params) + weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) multiplied_delta_w = gen_inject_mat_scalar_mult(ctx, builder, delta_w_ptr, lr) gen_inject_mat_sub(ctx, builder, weights_llvmlite, multiplied_delta_w, weights_llvmlite) diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index 7bf3f8f7579..f94b017e6dd 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -377,7 +377,7 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): efferent_node = proj.receiver efferent_node_error = error_dict[efferent_node] - weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, params) + weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) if proj_idx == 0: gen_inject_vxm_transposed( @@ -406,7 +406,7 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): afferent_node_activation = builder.gep(model_output, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(proj.sender._idx), ctx.int32_ty(0)]) # get dimensions of weight matrix - weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, params) + weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) pnlvm.helpers.printf_float_matrix(builder, weights_llvmlite, prefix= f"{proj.sender._mechanism} -> {proj.receiver._mechanism}\n", override_debug=False) # update delta_W node_delta_w = builder.gep(delta_w, [ctx.int32_ty(0), ctx.int32_ty(proj._idx)]) @@ -454,7 +454,7 @@ def _gen_llvm_training_function_body(self, ctx, builder, state, params, data): builder.call(optimizer_zero_grad, [optimizer_struct]) builder.call(backprop, [state, params, data, optimizer_struct]) - builder.call(optimizer_step_f, [optimizer_struct, params]) + builder.call(optimizer_step_f, [optimizer_struct, state, params]) def _get_compiled_optimizer(self): # setup optimizer @@ -690,10 +690,14 @@ def _gen_llvm_execute_derivative_func(self, ctx, builder, state, params, arg_in) ctx.int32_ty(0), ctx.int32_ty(self._idx)]) - f_params_ptr = pnlvm.helpers.get_param_ptr(builder, self._mechanism, mech_params, "function") + f_params, f_state = ctx.get_param_or_state_ptr(builder, + self._mechanism, + "function", + param_struct_ptr=mech_params, + state_struct_ptr=mech_state) + f_params, builder = self._mechanism._gen_llvm_param_ports_for_obj( - self._mechanism.function, f_params_ptr, ctx, builder, mech_params, mech_state, mech_input) - f_state = pnlvm.helpers.get_state_ptr(builder, self._mechanism, mech_state, "function") + self._mechanism.function, f_params, ctx, builder, mech_params, mech_state, mech_input) output, _ = self._mechanism._gen_llvm_invoke_function(ctx, builder, self._mechanism.function, f_params, f_state, mech_input, None, @@ -770,21 +774,31 @@ def log_matrix(self): self._projection.parameters.matrix._set(detached_matrix, context=self._context) self._projection.parameter_ports['matrix'].parameters.value._set(detached_matrix, context=self._context) - def _extract_llvm_matrix(self, ctx, builder, params): - proj_params = builder.gep(params, [ctx.int32_ty(0), - ctx.int32_ty(1), - ctx.int32_ty(self._idx)]) + def _extract_llvm_matrix(self, ctx, builder, state, params): + proj_params = builder.gep(params, [ctx.int32_ty(0), ctx.int32_ty(1), ctx.int32_ty(self._idx)]) + proj_state = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1), ctx.int32_ty(self._idx)]) dim_x, dim_y = self.matrix.detach().numpy().shape - proj_func = pnlvm.helpers.get_param_ptr(builder, self._projection, proj_params, "function") - proj_matrix = pnlvm.helpers.get_param_ptr(builder, self._projection.function, proj_func, "matrix") + + func_p, func_s = ctx.get_param_or_state_ptr(builder, + self._projection, + self._projection.parameters.function, + param_struct_ptr=proj_params, + state_struct_ptr=proj_state) + + proj_matrix = ctx.get_param_or_state_ptr(builder, + self._projection.function, + self._projection.function.parameters.matrix, + param_struct_ptr=func_p, + state_struct_ptr=func_s) + proj_matrix = builder.bitcast(proj_matrix, pnlvm.ir.types.ArrayType( pnlvm.ir.types.ArrayType(ctx.float_ty, dim_y), dim_x).as_pointer()) return proj_matrix def _gen_llvm_execute(self, ctx, builder, state, params, data): - proj_matrix = self._extract_llvm_matrix(ctx, builder, params) + proj_matrix = self._extract_llvm_matrix(ctx, builder, state, params) input_vec = builder.gep(data, [ctx.int32_ty(0), ctx.int32_ty(0), diff --git a/requirements.txt b/requirements.txt index 2106c31f7b4..d4c5ad265a1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,10 @@ autograd<1.7 -beartype<0.16.0 -dill<0.3.8 +beartype<0.18.0 +dill<0.3.9 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.1.1, <1.3.0 graphviz<0.21.0 -grpcio<1.60.0 +grpcio<1.61.0 leabra-psyneulink<0.3.3 llvmlite<0.42 matplotlib<3.7.3 @@ -13,10 +13,11 @@ networkx<3.3 numpy>=1.21.0, <1.24.5 optuna<3.4.0 packaging<24.0 -pandas<2.1.5 -pillow<10.2.0 +pandas<2.2.1 +pillow<10.3.0 pint<0.22.0 protobuf<3.20.4 rich>=10.1, <10.13 +scipy<1.12 toposort<1.11 torch>=1.10.0, <2.2.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' diff --git a/setup.cfg b/setup.cfg index 36b832f292d..141ba999200 100644 --- a/setup.cfg +++ b/setup.cfg @@ -67,9 +67,9 @@ required_plugins = pytest-benchmark pytest-cov pytest-helpers-namespace pytest-p xfail_strict = True filterwarnings = + error::SyntaxWarning error:Creating an ndarray from ragged nested sequences \(which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes\) is deprecated.*:numpy.VisibleDeprecationWarning error:Invalid escape sequence - ignore:Multiple ParameterPorts:UserWarning [pycodestyle] # for code explanation see https://pep8.readthedocs.io/en/latest/intro.html#error-codes diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 40ce2f5beea..29c1af6658d 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -1185,19 +1185,22 @@ def test_add_processing_pathway_subset_duplicate_warning(self, verbosity): C = TransferMechanism() comp = Composition() - comp.add_linear_processing_pathway(pathway=[A,B,C]) + comp.add_linear_processing_pathway(pathway=[A, B, C]) + comp.verbosePref = PreferenceEntry(verbosity, PreferenceLevel.INSTANCE) + + with warnings.catch_warnings(record=True) as msgs: + comp.add_linear_processing_pathway(pathway=[A, B]) - # Test for warning if verbosePref is set to True if verbosity: - regexp = f"Pathway specified in 'pathway' arg for add_linear_processing_pathway method of '{comp.name}' " \ - f"has a subset of nodes in a Pathway already in '{comp.name}': Pathway-0; the latter will be used." - with pytest.warns(UserWarning, match=regexp): - comp.verbosePref = PreferenceEntry(True, PreferenceLevel.INSTANCE) - comp.add_linear_processing_pathway(pathway=[A,B]) + # Test for warning if verbosePref is set to True + warning = f"Pathway specified in 'pathway' arg for add_linear_processing_pathway method of '{comp.name}' " \ + f"has a subset of nodes in a Pathway already in '{comp.name}': Pathway-0; the latter will be used." + + # The above issues 2 warnings, but we only test for one of them here + assert any(str(m.message) == warning for m in msgs), list(str(m.message) for m in msgs) else: - # Test for suppression of warning if verbosePref not set - with pytest.warns(None): - comp.add_linear_processing_pathway(pathway=[A,B]) + # Test for suppression of warning if verbosePref is not set + assert len(msgs) == 0 def test_add_backpropagation_pathway_exact_duplicate_warning(self): A = TransferMechanism() @@ -1230,19 +1233,24 @@ def test_add_backpropagation_pathway_contiguous_subset_duplicate_warning(self, v B = TransferMechanism() C = TransferMechanism() comp = Composition() - comp.add_backpropagation_learning_pathway(pathway=[A,B,C]) + comp.add_backpropagation_learning_pathway(pathway=[A, B, C]) + + comp.verbosePref = PreferenceEntry(verbosity, PreferenceLevel.INSTANCE) + + with warnings.catch_warnings(record=True) as msgs: + comp.add_backpropagation_learning_pathway(pathway=[A, B]) - # Test for warning if verbosePref is set to True if verbosity: - regexp = f"Pathway specified in 'pathway' arg for add_backpropagation_learning_pathway method of '{comp.name}'" \ - f" has a subset of nodes in a Pathway already in '{comp.name}':.*; the latter will be used." - with pytest.warns(UserWarning, match=regexp): - comp.verbosePref = PreferenceEntry(True, PreferenceLevel.INSTANCE) - comp.add_backpropagation_learning_pathway(pathway=[A,B]) + # Test for warning if verbosePref is set to True + warning = f"Pathway specified in 'pathway' arg for add_backpropagation_learning_pathway method of '{comp.name}'" \ + f" has a subset of nodes in a Pathway already in '{comp.name}': Pathway-0; the latter will be used." + + # The above issues 2 warnings, but we only test for one of them here + assert any(str(m.message) == warning for m in msgs), list(str(m.message) for m in msgs) else: # Test for suppression of warning if verbosePref is not set - with pytest.warns(None): - comp.add_backpropagation_learning_pathway(pathway=[A,B]) + assert len(msgs) == 0 + def test_add_processing_pathway_non_contiguous_subset_is_OK(self): A = TransferMechanism() @@ -3833,7 +3841,10 @@ def test_execute_no_inputs(self, mode): inner_comp = Composition(pathways=[m_inner]) m_outer = ProcessingMechanism(size=2) outer_comp = Composition(pathways=[m_outer, inner_comp]) - result = outer_comp.run(execution_mode=mode) + + with pytest.warns(UserWarning, match="No inputs provided in call"): + result = outer_comp.run(execution_mode=mode) + np.testing.assert_allclose(result, [[0.0, 0.0]]) @pytest.mark.composition @@ -3842,7 +3853,10 @@ def test_run_no_inputs(self, comp_mode): inner_comp = Composition(pathways=[m_inner]) m_outer = ProcessingMechanism(size=2) outer_comp = Composition(pathways=[m_outer, inner_comp]) - result = outer_comp.run(execution_mode=comp_mode) + + with pytest.warns(UserWarning, match="No inputs provided in call"): + result = outer_comp.run(execution_mode=comp_mode) + np.testing.assert_allclose(result, [[0.0, 0.0]]) def test_lpp_invalid_matrix_keyword(self): @@ -4922,7 +4936,7 @@ def test_invalid_projection_deletion_when_nesting_comps(self): allocation_samples=pnl.SampleSpec(start=1.0, stop=5.0, num=5))]) ) assert not ocomp._check_for_existing_projections(sender=ib, receiver=ocomp_objective_mechanism) - return ocomp + # # Does not work yet due to initialize_cycle_values bug that causes first recurrent projection to pass different values # # to TranfserMechanism version vs Logistic fn + AdaptiveIntegrator fn version # def test_recurrent_transfer_mechanism_composition(self): diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index bda69ad4353..0533cd934c2 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1,5 +1,5 @@ +import contextlib import re - import numpy as np import pytest @@ -3587,27 +3587,27 @@ def test_model_based_num_estimates(self, num_estimates, rand_var): intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) - warning_type = None + warning_msg = f"'OptimizationControlMechanism-0' has 'num_estimates = {num_estimates}' specified, " \ + f"but its 'agent_rep' \\('comp'\\) has no random variables: " \ + f"'RANDOMIZATION_CONTROL_SIGNAL' will not be created, and num_estimates set to None." + if num_estimates and not rand_var: - warning_type = UserWarning - warning_msg = f'"\'OptimizationControlMechanism-0\' has \'num_estimates = {num_estimates}\' specified, ' \ - f'but its \'agent_rep\' (\'comp\') has no random variables: ' \ - f'\'RANDOMIZATION_CONTROL_SIGNAL\' will not be created, and num_estimates set to None."' - with pytest.warns(warning_type) as warnings: + warning_context = pytest.warns(UserWarning, match=warning_msg) + else: + warning_context = contextlib.nullcontext() + + with warning_context: ocm = pnl.OptimizationControlMechanism(agent_rep=comp, state_features=[A.input_port], objective_mechanism=objective_mech, function=pnl.GridSearch(), num_estimates=num_estimates, control_signals=[control_signal]) - if warning_type: - assert any(warning_msg == repr(w.message.args[0]) for w in warnings) comp.add_controller(ocm) inputs = {A: [[[1.0]]]} - comp.run(inputs=inputs, - num_trials=2) + comp.run(inputs=inputs, num_trials=2) if not num_estimates or not rand_var: assert pnl.RANDOMIZATION_CONTROL_SIGNAL not in comp.controller.control_signals # Confirm no estimates @@ -3710,22 +3710,17 @@ def test_grid_search_random_selection(self, comp_mode, benchmark): inputs = {A: [[[1.0]]]} - comp.run(inputs=inputs, num_trials=10, context='outer_comp', execution_mode=comp_mode) - np.testing.assert_allclose(comp.results, [[[0.7310585786300049]], [[0.999999694097773]], [[0.999999694097773]], [[0.9999999979388463]], [[0.9999999979388463]], [[0.999999694097773]], [[0.9999999979388463]], [[0.999999999986112]], [[0.999999694097773]], [[0.9999999999999993]]]) + benchmark(comp.run, inputs=inputs, num_trials=10, context='outer_comp', execution_mode=comp_mode) + np.testing.assert_allclose(comp.results[:10], + [[[0.7310585786300049]], [[0.999999694097773]], [[0.999999694097773]], [[0.9999999979388463]], [[0.9999999979388463]], + [[0.999999694097773]], [[0.9999999979388463]], [[0.999999999986112]], [[0.999999694097773]], [[0.9999999999999993]]]) # control signal value (mod slope) is chosen randomly from all of the control signal values # that correspond to a net outcome of 1 if comp_mode is pnl.ExecutionMode.Python: log_arr = A.log.nparray_dictionary() np.testing.assert_allclose([[1.], [15.], [15.], [20.], [20.], [15.], [20.], [25.], [15.], [35.]], - log_arr['outer_comp']['mod_slope']) - - if benchmark.enabled: - # Disable logging for the benchmark run - A.log.set_log_conditions(items="mod_slope", log_condition=LogCondition.OFF) - A.log.clear_entries() - benchmark(comp.run, inputs=inputs, num_trials=10, context='bench_outer_comp', execution_mode=comp_mode) - assert len(A.log.get_logged_entries()) == 0 + log_arr['outer_comp']['mod_slope'][:10]) def test_input_CIM_assignment(self, comp_mode): diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index f46b7472c2e..69c48556e3b 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -12,10 +12,7 @@ from psyneulink.core.globals.keywords import Loss # from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import SSE, MSE, L0 -@pytest.mark.pytorch -@pytest.mark.composition -@pytest.fixture -def xor_network(): +def xor_network(comp_type, comp_learning_rate, pathway_learning_rate): """Create simple sample network for testing learning specifications Returns a function that takes a Composition type and learning_rate specifications and returns an instantiated Composition and its components @@ -36,47 +33,44 @@ def xor_network(): matrix=np.full((10,1), 0.1), sender=hidden_layer, receiver=output_layer) - inputs = np.array([[0, 0],[0, 1],[1, 0],[1, 1]]) - targets = np.array([[0],[1],[1],[0]]) - def _get_comp_type(comp_type, comp_learning_rate, pathway_learning_rate): - if comp_type == 'composition': - xor = Composition(learning_rate=comp_learning_rate) - # Note: uses Projections specified above by inference - pathway = xor.add_backpropagation_learning_pathway(pathway=[input_layer,hidden_layer,output_layer], - learning_rate=pathway_learning_rate) - target_mechanism = pathway.learning_components[pnl.TARGET_MECHANISM] - elif comp_type == 'autodiff': - # FIX: the format commented out below doesn't work for LLVM: - # xor = pnl.AutodiffComposition(nodes=[input_layer,hidden_layer,output_layer]) - # xor.add_projections([input_to_hidden_wts, hidden_to_output_wts]) - xor = pnl.AutodiffComposition() - xor.add_node(input_layer) - xor.add_node(hidden_layer) - xor.add_node(output_layer) - xor.add_projection(sender=input_layer, projection=input_to_hidden_wts, receiver=hidden_layer) - xor.add_projection(sender=hidden_layer, projection=hidden_to_output_wts, receiver=output_layer) - target_mechanism = None - else: - assert False, f"Bad composition type parameter passed to xor_net fixture" - return xor, input_layer, hidden_layer, output_layer, target_mechanism, inputs, targets, - return _get_comp_type + inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) + targets = np.array([[0], [1], [1], [0]]) + + if comp_type == 'composition': + xor = Composition(learning_rate=comp_learning_rate) + # Note: uses Projections specified above by inference + pathway = xor.add_backpropagation_learning_pathway(pathway=[input_layer,hidden_layer,output_layer], + learning_rate=pathway_learning_rate) + target_mechanism = pathway.learning_components[pnl.TARGET_MECHANISM] + elif comp_type == 'autodiff': + # FIX: the format commented out below doesn't work for LLVM: + # xor = pnl.AutodiffComposition(nodes=[input_layer,hidden_layer,output_layer]) + # xor.add_projections([input_to_hidden_wts, hidden_to_output_wts]) + xor = pnl.AutodiffComposition() + xor.add_node(input_layer) + xor.add_node(hidden_layer) + xor.add_node(output_layer) + xor.add_projection(sender=input_layer, projection=input_to_hidden_wts, receiver=hidden_layer) + xor.add_projection(sender=hidden_layer, projection=hidden_to_output_wts, receiver=output_layer) + target_mechanism = None + else: + assert False, f"Bad composition type parameter passed to xor_net fixture" + return xor, input_layer, hidden_layer, output_layer, target_mechanism, inputs, targets, class TestInputAndTargetSpecs: - @pytest.mark.pytorch - @pytest.mark.parametrize('input_type', ['dict', 'func', 'gen', 'gen_func'], - ids=['dict', 'func', 'gen', 'gen_func']) - @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.PyTorch, - pnl.ExecutionMode.LLVMRun, - pnl.ExecutionMode.Python], - ids=['PyTorch', 'LLVM', 'Python']) - @pytest.mark.parametrize('comp_type', ['composition', 'autodiff'], - ids=['composition', 'autodiff']) - def node_spec_types(self, xor_network, comp_type, input_type, exec_mode): + @pytest.mark.composition + @pytest.mark.parametrize('input_type', ['dict', 'func', 'gen', 'gen_func']) + @pytest.mark.parametrize('exec_mode', [pytest.param(pnl.ExecutionMode.PyTorch, marks=pytest.mark.pytorch), + pytest.param(pnl.ExecutionMode.LLVMRun, marks=pytest.mark.llvm), + pnl.ExecutionMode.Python]) + @pytest.mark.parametrize('comp_type', ['composition', + pytest.param('autodiff', marks=pytest.mark.pytorch)]) + def test_node_spec_types(self, comp_type, input_type, exec_mode): if comp_type == 'composition' and exec_mode != pnl.ExecutionMode.Python: - pytest.skip(f"Execution mode {exec_mode} not relevant for Composition") + pytest.skip(f"Execution mode {exec_mode} not relevant for Composition learn") comp, input_layer, hidden_layer, output_layer, target_mechanism, stims, targets =\ xor_network(comp_type, 0.001, None) @@ -113,12 +107,8 @@ def get_inputs_gen(): else: assert False, f"Unrecognized input_type: {input_type}" - expected_results = [[0.6341436044849351]] - if comp_type is 'composition': - results = comp.learn(inputs=inputs) - else: - results = comp.learn(inputs=inputs, execution_mode=exec_mode) - np.testing.assert_allclose(results, expected_results) + results = comp.learn(inputs=inputs, execution_mode=exec_mode) + np.testing.assert_allclose(results, [[0.6341436044849351]]) @pytest.mark.composition @pytest.mark.pytorch @@ -1904,7 +1894,7 @@ def test_matrix_spec_and_learning_rate(self): ('learning_mech', .01, .02, .03, .04, [[0.63458688]]), ] @pytest.mark.parametrize('spec_types', spec_types, ids=[x[0] for x in spec_types]) - def test_different_learning_rate_specs_for_comp(self, xor_network, spec_types): + def test_different_learning_rate_specs_for_comp(self, spec_types): learning_mech_learning_rate = spec_types[1] learning_pathway_learning_rate = spec_types[2] composition_learning_rate = spec_types[3] diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 0dcde7a5562..0fd45cb4bc3 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -128,8 +128,8 @@ def test_pec_run_input_formats(inputs_dict, error_msg): "opt_method, result", [ ("differential_evolution", [0.010363518438648106]), - (optuna.samplers.RandomSampler(), [0.01]), - (optuna.samplers.CmaEsSampler(), [0.01]), + (optuna.samplers.RandomSampler(seed=0), [0.01]), + (optuna.samplers.CmaEsSampler(seed=0), [0.01]), ], ids=["differential_evolultion", "optuna_random_sampler", "optuna_cmaes_sampler"], ) diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index 2ccb11e032a..c4cc72707d5 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -444,7 +444,7 @@ def test_DictionaryMemory_without_assoc(self): def test_DictionaryMemory_with_duplicate_entry_in_initializer_warning(self): - regexp = r'Attempt to initialize memory of DictionaryMemory with an entry \([[1 2 3]' + regexp = r'Attempt to initialize memory of DictionaryMemory with an entry \(\[\[1 2 3\]' with pytest.warns(UserWarning, match=regexp): em = EpisodicMemoryMechanism( name='EPISODIC MEMORY MECH', @@ -1034,7 +1034,7 @@ def test_ContentAddressableMemory_without_initializer_and_diff_field_sizes(self) def test_ContentAddressableMemory_with_duplicate_entry_in_initializer_warning(self): - regexp = r'Attempt to initialize memory of ContentAddressableMemory with an entry \([[1 2 3]' + regexp = r'Attempt to initialize memory of ContentAddressableMemory with an entry \(\[\[1 2 3\]' with pytest.warns(UserWarning, match=regexp): c = ContentAddressableMemory( initializer=np.array([[[1,2,3], [4,5,6]], diff --git a/tests/mdf/model_backprop.py b/tests/mdf/model_backprop.py index 2639a433c42..d21ff428e35 100644 --- a/tests/mdf/model_backprop.py +++ b/tests/mdf/model_backprop.py @@ -1,10 +1,10 @@ import psyneulink as pnl -a = pnl.TransferMechanism() -b = pnl.TransferMechanism() -c = pnl.TransferMechanism() +A = pnl.TransferMechanism(name='A') +B = pnl.TransferMechanism(name='B') +C = pnl.TransferMechanism(name='C') -p = pnl.Pathway(pathway=[a, b, c]) +p = pnl.Pathway(pathway=[A, B, C]) -comp = pnl.Composition() +comp = pnl.Composition(name='comp') comp.add_backpropagation_learning_pathway(pathway=p) diff --git a/tests/mdf/test_mdf.py b/tests/mdf/test_mdf.py index a8ae9b7ddde..e66df63ae60 100644 --- a/tests/mdf/test_mdf.py +++ b/tests/mdf/test_mdf.py @@ -1,6 +1,7 @@ import copy import numpy as np import os +import sys import psyneulink as pnl import pytest @@ -35,7 +36,7 @@ def get_onnx_fixed_noise_str(onnx_op, **kwargs): } -json_results_parametrization = [ +pnl_mdf_results_parametrization = [ ('model_basic.py', 'comp', '{A: 1}', True), ('model_basic.py', 'comp', '{A: 1}', False), ('model_basic_non_identity.py', 'comp', '{A: 1}', True), @@ -72,65 +73,148 @@ def get_onnx_fixed_noise_str(onnx_op, **kwargs): str(stroop_stimuli).replace("'", ''), False ), - ('model_backprop.py', 'comp', '{a: [1, 2, 3]}', False), + ('model_backprop.py', 'comp', '{A: [1, 2, 3]}', False), ] +def get_mdf_output_file(orig_filename, tmp_path, format='json'): + """ + Returns: + tuple(pathlib.Path, str, str): + - a pytest tmp_path temp file using **orig_filename** and + **format** + - the full path to the temp file + - the full path to the temp file formatted so that it can be + used in an exec/eval string + """ + mdf_file = tmp_path / orig_filename.replace('.py', f'.{format}') + mdf_fname = str(mdf_file.absolute()) + + # need to escape backslash to use a filename in exec on windows + if sys.platform.startswith('win'): + mdf_exec_fname = mdf_fname.replace('\\', '\\\\') + else: + mdf_exec_fname = mdf_fname + + return mdf_file, mdf_fname, mdf_exec_fname + + +def read_defined_model_script(filename): + filename = os.path.join(os.path.dirname(__file__), filename) + + with open(filename, 'r') as orig_file: + model_input = orig_file.read() + + return model_input + + +def get_loaded_model_state(model_input: str): + _globals = copy.copy(globals()) + _locals = copy.copy(locals()) + + exec(model_input, _globals, _locals) + + return _globals, _locals + + +def run_compositions_in_state( + composition_input_strs, _globals, _locals, extra_run_args_str='' +): + results = {} + + for comp_name, inputs in composition_input_strs.items(): + exec(f'{comp_name}.run(inputs={inputs}, {extra_run_args_str})', _globals, _locals) + results[comp_name] = eval(f'{comp_name}.results', _globals, _locals) + + return results, _globals, _locals + + +def get_model_results_and_state( + model_input: str, composition_input_strs, extra_run_args_str='' +): + _globals, _locals = get_loaded_model_state(model_input) + return run_compositions_in_state( + composition_input_strs, _globals, _locals, extra_run_args_str + ) + + +def assert_result_equality(orig_results, new_results): + # compositions + assert orig_results.keys() == new_results.keys() + + for comp_name in orig_results: + np.testing.assert_allclose( + orig_results[comp_name], + new_results[comp_name], + err_msg=f"Results for composition '{comp_name}' are not equal:" + ) + + @pytest.mark.parametrize( 'filename, composition_name, input_dict_str, simple_edge_format', - json_results_parametrization + pnl_mdf_results_parametrization ) -def test_json_results_equivalence( +def test_get_mdf_serialized_results_equivalence_pnl_only( filename, composition_name, input_dict_str, simple_edge_format, ): + comp_inputs = {composition_name: input_dict_str} + # Get python script from file and execute - filename = os.path.join(os.path.dirname(__file__), filename) - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - exec(f'{composition_name}.run(inputs={input_dict_str})') - orig_results = eval(f'{composition_name}.results') + orig_script = read_defined_model_script(filename) + orig_results, orig_globals, orig_locals = get_model_results_and_state( + orig_script, comp_inputs + ) # reset random seed pnl.core.globals.utilities.set_global_seed(0) - # Generate python script from JSON summary of composition and execute - json_summary = pnl.generate_json(eval(f'{composition_name}'), simple_edge_format=simple_edge_format) - exec(pnl.generate_script_from_json(json_summary)) - exec(f'{composition_name}.run(inputs={input_dict_str})') - new_results = eval(f'{composition_name}.results') - assert pnl.safe_equals(orig_results, new_results) + # Generate python script from MDF serialization of composition and execute + mdf_data = pnl.get_mdf_serialized( + eval(f'{composition_name}', orig_globals, orig_locals), + simple_edge_format=simple_edge_format + ) + new_script = pnl.generate_script_from_mdf(mdf_data) + new_results, _, _ = get_model_results_and_state(new_script, comp_inputs) + + assert_result_equality(orig_results, new_results) @pytest.mark.parametrize( 'filename, composition_name, input_dict_str, simple_edge_format', - json_results_parametrization + pnl_mdf_results_parametrization ) -def test_write_json_file( +def test_write_mdf_file_results_equivalence_pnl_only( filename, composition_name, input_dict_str, simple_edge_format, + tmp_path, ): + comp_inputs = {composition_name: input_dict_str} + # Get python script from file and execute - filename = os.path.join(os.path.dirname(__file__), filename) - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - exec(f'{composition_name}.run(inputs={input_dict_str})') - orig_results = eval(f'{composition_name}.results') + orig_script = read_defined_model_script(filename) + orig_results, orig_globals, orig_locals = get_model_results_and_state( + orig_script, comp_inputs + ) # reset random seed pnl.core.globals.utilities.set_global_seed(0) - # Save json_summary of Composition to file and read back in. - json_filename = filename.replace('.py','.json') - exec(f'pnl.write_json_file({composition_name}, json_filename, simple_edge_format=simple_edge_format)') - exec(pnl.generate_script_from_json(json_filename)) - # exec(f'{composition_name}.run(inputs={input_dict_str})') - exec(f'pnl.get_compositions()[0].run(inputs={input_dict_str})') - final_results = eval(f'{composition_name}.results') - assert pnl.safe_equals(orig_results, final_results) + # Save MDF serialization of Composition to file and read back in. + _, mdf_fname, mdf_exec_fname = get_mdf_output_file(filename, tmp_path) + exec( + f'pnl.write_mdf_file({composition_name}, "{mdf_exec_fname}", simple_edge_format={simple_edge_format})', + orig_globals, + orig_locals, + ) + + new_script = pnl.generate_script_from_mdf(mdf_fname) + new_results, _, _ = get_model_results_and_state(new_script, comp_inputs) + + assert_result_equality(orig_results, new_results) @pytest.mark.parametrize( @@ -144,41 +228,62 @@ def test_write_json_file( ('model_with_two_disjoint_comps.py', {'comp': '{A: 1}', 'comp2': '{C: 1}'}), ] ) -def test_write_json_file_multiple_comps( +def test_write_mdf_file_results_equivalence_pnl_only_multiple_comps( filename, input_dict_strs, + tmp_path, ): - orig_results = {} - # Get python script from file and execute - filename = os.path.join(os.path.dirname(__file__), filename) - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - - for composition_name in input_dict_strs: - exec(f'{composition_name}.run(inputs={input_dict_strs[composition_name]})') - orig_results[composition_name] = eval(f'{composition_name}.results') - + orig_script = read_defined_model_script(filename) + orig_results, orig_globals, orig_locals = get_model_results_and_state( + orig_script, input_dict_strs + ) # reset random seed pnl.core.globals.utilities.set_global_seed(0) - # Save json_summary of Composition to file and read back in. - json_filename = filename.replace('.py', '.json') + # Save MDF serialization of Composition to file and read back in. + _, mdf_fname, mdf_exec_fname = get_mdf_output_file(filename, tmp_path) + exec( + f'pnl.write_mdf_file([{",".join(input_dict_strs)}], "{mdf_exec_fname}")', + orig_globals, + orig_locals + ) - exec(f'pnl.write_json_file([{",".join(input_dict_strs)}], json_filename)') - exec(pnl.generate_script_from_json(json_filename)) + new_script = pnl.generate_script_from_mdf(mdf_fname) + new_results, _, _ = get_model_results_and_state(new_script, input_dict_strs) - for composition_name in input_dict_strs: - exec(f'{composition_name}.run(inputs={input_dict_strs[composition_name]})') - final_results = eval(f'{composition_name}.results') - assert orig_results[composition_name] == final_results, f'{composition_name}:' + assert_result_equality(orig_results, new_results) -def _get_mdf_model_results(evaluable_graph): - return [ - [eo.curr_value for _, eo in evaluable_graph.enodes[node.id].evaluable_outputs.items()] - for node in evaluable_graph.scheduler.consideration_queue[-1] - ] +def _get_mdf_model_results(evaluable_graph, composition=None): + """ + Returns psyneulink-style output for **evaluable_graph**, optionally + casting outputs to their equivalent node's shape in **composition** + """ + if composition is not None: + node_output_shapes = { + # NOTE: would use defaults.value here, but it doesn't always + # match the shape of value (specifically here, + # FitzHughNagumoIntegrator EULER) + pnl.parse_valid_identifier(node.name): node.value.shape + for node in composition.get_nodes_by_role(pnl.NodeRole.OUTPUT) + } + else: + node_output_shapes = {} + + res = [] + for node in evaluable_graph.scheduler.consideration_queue[-1]: + next_res_elem = [ + eo.curr_value for eo in evaluable_graph.enodes[node.id].evaluable_outputs.values() + ] + try: + next_res_elem = np.reshape(next_res_elem, node_output_shapes[node.id]) + except KeyError: + pass + + res.append(next_res_elem) + + return pnl.convert_to_np_array(res) # These runtime_params are necessary because noise seeding is not @@ -226,27 +331,28 @@ def _get_mdf_model_results(evaluable_graph): ('model_integrators.py', 'comp', {'A': 1.0}, False, integrators_runtime_params), ] ) -def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_format, run_args): +def test_mdf_pnl_results_equivalence(filename, composition_name, input_dict, simple_edge_format, run_args, tmp_path): from modeci_mdf.utils import load_mdf import modeci_mdf.execution_engine as ee + comp_inputs = {composition_name: input_dict} + # Get python script from file and execute - filename = os.path.join(os.path.dirname(__file__), filename) - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - inputs_str = str(input_dict).replace("'", '') - exec(f'{composition_name}.run(inputs={inputs_str}, {run_args})') - orig_results = eval(f'{composition_name}.results') + orig_script = read_defined_model_script(filename) + orig_results, orig_globals, orig_locals = get_model_results_and_state( + orig_script, comp_inputs, run_args + ) - # Save json_summary of Composition to file and read back in. - json_filename = filename.replace('.py', '.json') - pnl.write_json_file(eval(composition_name), json_filename, simple_edge_format=simple_edge_format) + # Save MDF serialization of Composition to file and read back in. + _, mdf_fname, _ = get_mdf_output_file(filename, tmp_path) + composition = eval(composition_name, orig_globals, orig_locals) + pnl.write_mdf_file(composition, mdf_fname, simple_edge_format=simple_edge_format) - m = load_mdf(json_filename) + m = load_mdf(mdf_fname) eg = ee.EvaluableGraph(m.graphs[0], verbose=True) eg.evaluate(initializer={f'{node}_InputPort_0': i for node, i in input_dict.items()}) - assert pnl.safe_equals(orig_results, _get_mdf_model_results(eg)) + assert_result_equality(orig_results, {composition_name: _get_mdf_model_results(eg, composition)}) ddi_termination_conds = [ @@ -293,7 +399,7 @@ def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_for *individual_functions_fhn_test_data, ] ) -def test_mdf_equivalence_individual_functions(mech_type, function, runtime_params, trial_termination_cond): +def test_mdf_pnl_results_equivalence_individual_functions(mech_type, function, runtime_params, trial_termination_cond): import modeci_mdf.execution_engine as ee A = mech_type(name='A', function=copy.deepcopy(function)) @@ -313,21 +419,22 @@ def test_mdf_equivalence_individual_functions(mech_type, function, runtime_param eg = ee.EvaluableGraph(model.graphs[0], verbose=True) eg.evaluate(initializer={'A_InputPort_0': 1.0}) - assert pnl.safe_equals(comp.results, _get_mdf_model_results(eg)) + np.testing.assert_array_equal(comp.results, _get_mdf_model_results(eg, comp)) -@pytest.mark.parametrize('filename', ['model_basic.py']) +@pytest.mark.parametrize( + 'filename, composition_name', + [ + ('model_basic.py', 'comp'), + ] +) @pytest.mark.parametrize('fmt', ['json', 'yml']) -def test_generate_script_from_mdf(filename, fmt): - filename = os.path.join(os.path.dirname(__file__), filename) - outfi = filename.replace('.py', f'.{fmt}') - - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - serialized = eval(f'pnl.get_mdf_serialized(comp, fmt="{fmt}")') +def test_generate_script_from_mdf(filename, composition_name, fmt, tmp_path): + orig_file = read_defined_model_script(filename) + exec(orig_file) + serialized = eval(f'pnl.get_mdf_serialized({composition_name}, fmt="{fmt}")') - with open(outfi, 'w') as f: - f.write(serialized) + mdf_file, mdf_fname, _ = get_mdf_output_file(filename, tmp_path, fmt) + mdf_file.write_text(serialized) - with open(outfi, 'r') as f: - assert pnl.generate_script_from_mdf(f.read()) == pnl.generate_script_from_mdf(outfi) + assert pnl.generate_script_from_mdf(mdf_file.read_text()) == pnl.generate_script_from_mdf(mdf_fname) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index c9e463a4bf2..e3f306b0f81 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -77,7 +77,7 @@ def test_valid(self): # reset only decision variable D.function.initializer = 1.0 - D.function.non_decision_time = 0.0 + D.function.non_decision_time.base = 0.0 D.reset() np.testing.assert_allclose(D.function.value[0], 1.0) np.testing.assert_allclose(D.function.parameters.previous_value.get(), 1.0) diff --git a/tests/mechanisms/test_lca.py b/tests/mechanisms/test_lca.py index 7374f1e0679..9996dca42d6 100644 --- a/tests/mechanisms/test_lca.py +++ b/tests/mechanisms/test_lca.py @@ -185,12 +185,14 @@ def test_LCAMechanism_threshold_with_convergence(self, benchmark, comp_mode): comp = Composition() comp.add_node(lca) - result = comp.run(inputs={lca:[0,1,2]}, execution_mode=comp_mode) - np.testing.assert_allclose(result, [[0.19153799, 0.5, 0.80846201]]) + def func(*args, **kwargs): + res = comp.run(*args, **kwargs) + return (res, lca.num_executions_before_finished) + + results = benchmark(func, inputs={lca:[0,1,2]}, execution_mode=comp_mode) + np.testing.assert_allclose(results[0], [[0.19153799, 0.5, 0.80846201]]) if comp_mode is pnl.ExecutionMode.Python: - assert lca.num_executions_before_finished == 18 - if benchmark.enabled: - benchmark(comp.run, inputs={lca:[0,1,2]}, execution_mode=comp_mode) + assert results[1] == 18 @pytest.mark.composition @pytest.mark.lca_mechanism diff --git a/tests/mechanisms/test_mechanisms.py b/tests/mechanisms/test_mechanisms.py index fd403991f6e..2835140936f 100644 --- a/tests/mechanisms/test_mechanisms.py +++ b/tests/mechanisms/test_mechanisms.py @@ -50,7 +50,7 @@ def test_noise_assignment_equivalence(self, noise): t2 = pnl.TransferMechanism(name='t2', size=2) t2.integrator_function.parameters.noise.set(noise()) - t1.integrator_function.noise.seed = 0 + t1.integrator_function.noise.seed.base = 0 t2.integrator_function.noise.base.seed = 0 for _ in range(5): diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index 9fc8a95c4b7..d4fa9ff75bd 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -820,7 +820,7 @@ def test_recurrent_mech_with_learning(self): ) # Test that all of these are the same: np.testing.assert_allclose( - R.recurrent_projection.mod_matrix, + R.recurrent_projection.matrix.modulated, [ [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], @@ -880,8 +880,8 @@ def test_recurrent_mech_change_learning_rate(self): [1.1, 0., 1.1, 1.1], [1.1, 1.1, 0., 1.1], [1.1, 1.1, 1.1, 0.]] - np.testing.assert_allclose(R.recurrent_projection.mod_matrix, matrix_1) - print(R.recurrent_projection.mod_matrix) + np.testing.assert_allclose(R.recurrent_projection.get_mod_matrix(c), matrix_1) + print(R.recurrent_projection.get_mod_matrix(c)) R.learning_rate.base = 0.9 assert R.learning_rate.base == 0.9 @@ -892,8 +892,8 @@ def test_recurrent_mech_change_learning_rate(self): [1.911125, 0., 1.911125, 1.911125], [1.911125, 1.911125, 0., 1.911125], [1.911125, 1.911125, 1.911125, 0.]] - # np.testing.assert_allclose(R.recurrent_projection.mod_matrix, matrix_2) - print(R.recurrent_projection.mod_matrix) + # np.testing.assert_allclose(R.recurrent_projection.get_mod_matrix(c), matrix_2) + print(R.recurrent_projection.get_mod_matrix(c)) def test_learning_of_orthognal_inputs(self): size=4 diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 85814d3388e..1873e5dcae7 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -299,33 +299,19 @@ def test_transfer_mech_exponential_noise(self): @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_uniform_to_normal_noise(self): - try: - import scipy - except ModuleNotFoundError: - with pytest.raises(FunctionError) as error_text: - T = TransferMechanism( - name='T', - default_variable=[0, 0, 0, 0], - function=Linear(), - noise=UniformToNormalDist(), - integration_rate=1.0 - ) - assert "The UniformToNormalDist function requires the SciPy package." in str(error_text.value) - else: - T = TransferMechanism( - name='T', - default_variable=[0, 0, 0, 0], - function=Linear(), - noise=UniformToNormalDist(), - integration_rate=1.0 - ) - # This is equivalent to - # T.noise.base.parameters.random_state.get(None).seed([22]) - T.noise.parameters.seed.set(22, None) - val = T.execute([0, 0, 0, 0]) - np.testing.assert_allclose(val, [[1.73027452, -1.07866481, -1.98421126, 2.99564032]]) - + T = TransferMechanism( + name='T', + default_variable=[0, 0, 0, 0], + function=Linear(), + noise=UniformToNormalDist(), + integration_rate=1.0 + ) + # This is equivalent to + # T.noise.base.parameters.random_state.get(None).seed([22]) + T.noise.parameters.seed.set(22, None) + val = T.execute([0, 0, 0, 0]) + np.testing.assert_allclose(val, [[1.73027452, -1.07866481, -1.98421126, 2.99564032]]) @pytest.mark.mechanism @pytest.mark.transfer_mechanism diff --git a/tests/projections/test_projection_specifications.py b/tests/projections/test_projection_specifications.py index b1602f131fe..adee0838155 100644 --- a/tests/projections/test_projection_specifications.py +++ b/tests/projections/test_projection_specifications.py @@ -462,25 +462,23 @@ def test_formats_for_gating_specification_of_input_and_output_ports(self, input_ # assert 'Primary OutputPort of ControlMechanism-1 (ControlSignal-0) ' \ # 'cannot be used as a sender of a Projection to OutputPort of T2' in error_text.value.args[0] + @pytest.mark.filterwarnings("error:elementwise comparison failed; returning scalar instead:") def test_no_warning_when_matrix_specified(self): - with pytest.warns(None) as w: - c = pnl.Composition() - m0 = pnl.ProcessingMechanism( - default_variable=[0, 0, 0, 0] - ) - p0 = pnl.MappingProjection( - matrix=[[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]] - ) - m1 = pnl.TransferMechanism( - default_variable=[0, 0, 0, 0] - ) - c.add_linear_processing_pathway([m0, p0, m1]) - for warn in w: - assert 'elementwise comparison failed; returning scalar instead' not in warn.message.args[0] + c = pnl.Composition() + m0 = pnl.ProcessingMechanism( + default_variable=[0, 0, 0, 0] + ) + p0 = pnl.MappingProjection( + matrix=[[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]] + ) + m1 = pnl.TransferMechanism( + default_variable=[0, 0, 0, 0] + ) + c.add_linear_processing_pathway([m0, p0, m1]) # KDM: this is a good candidate for pytest.parametrize def test_masked_mapping_projection(self):