Skip to content

Commit

Permalink
chore: add python 3.11 support (#388)
Browse files Browse the repository at this point in the history
Signed-off-by: ThibaultFy <[email protected]>
  • Loading branch information
ThibaultFy authored Oct 9, 2023
1 parent 05a38a2 commit 99be3e2
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 24 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10"]
python-version: ["3.8", "3.9", "3.10", "3.11"]
name: Tests on Python ${{ matrix.python-version }}
steps:
- name: Set up python
Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased]

### Added

- Support on Python 3.11 ([#388](https://github.com/Substra/substra/pull/388))

## [0.48.1](https://github.com/Substra/substra/releases/tag/0.48.1) - 2023-10-06

### Changed
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
keywords=["cli", "substra"],
packages=find_packages(exclude=["tests*"]),
Expand Down
46 changes: 23 additions & 23 deletions tests/data_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,11 @@ def fake_data(self, n_samples=None):
@tools.register
def score(inputs, outputs, task_properties):
y_true = inputs['{InputIdentifiers.datasamples}'][1]
y_pred = _get_predictions(inputs['{InputIdentifiers.predictions}'])
y_true = inputs['{InputIdentifiers.datasamples.value}'][1]
y_pred = _get_predictions(inputs['{InputIdentifiers.predictions.value}'])
res = sum(y_pred) - sum(y_true)
print(f'metrics, y_true: {{y_true}}, y_pred: {{y_pred}}, result: {{res}}')
tools.save_performance(res, outputs['{OutputIdentifiers.performance}'])
tools.save_performance(res, outputs['{OutputIdentifiers.performance.value}'])
def _get_predictions(path):
with open(path) as f:
Expand All @@ -77,9 +77,9 @@ def _get_predictions(path):
@tools.register
def train(inputs, outputs, task_properties):
X = inputs['{InputIdentifiers.datasamples}'][0]
y = inputs['{InputIdentifiers.datasamples}'][1]
models_path = inputs.get('{InputIdentifiers.shared}', [])
X = inputs['{InputIdentifiers.datasamples.value}'][0]
y = inputs['{InputIdentifiers.datasamples.value}'][1]
models_path = inputs.get('{InputIdentifiers.shared.value}', [])
models = [_load_model(model_path) for model_path in models_path]
print(f'Train, get X: {{X}}, y: {{y}}, models: {{models}}')
Expand All @@ -95,17 +95,17 @@ def train(inputs, outputs, task_properties):
res = dict(value=avg + err)
print(f'Train, return {{res}}')
_save_model(res, outputs['{OutputIdentifiers.shared}'])
_save_model(res, outputs['{OutputIdentifiers.shared.value}'])
@tools.register
def predict(inputs, outputs, task_properties):
X = inputs['{InputIdentifiers.datasamples}'][0]
model = _load_model(inputs['{InputIdentifiers.shared}'])
X = inputs['{InputIdentifiers.datasamples.value}'][0]
model = _load_model(inputs['{InputIdentifiers.shared.value}'])
res = [x * model['value'] for x in X]
print(f'Predict, get X: {{X}}, model: {{model}}, return {{res}}')
_save_predictions(res, outputs['{OutputIdentifiers.predictions}'])
_save_predictions(res, outputs['{OutputIdentifiers.predictions.value}'])
def _load_model(path):
with open(path) as f:
Expand All @@ -129,18 +129,18 @@ def _save_predictions(y_pred, path):
@tools.register
def aggregate(inputs, outputs, task_properties):
models_path = inputs.get('{InputIdentifiers.shared}', [])
models_path = inputs.get('{InputIdentifiers.shared.value}', [])
models = [_load_model(model_path) for model_path in models_path]
print(f'Aggregate models: {{models}}')
values = [m['value'] for m in models]
avg = sum(values) / len(values)
res = dict(value=avg)
print(f'Aggregate result: {{res}}')
_save_model(res, outputs['{OutputIdentifiers.shared}'])
_save_model(res, outputs['{OutputIdentifiers.shared.value}'])
@tools.register
def predict(inputs, outputs, task_properties):
_save_predictions(0, outputs['{OutputIdentifiers.predictions}'])
_save_predictions(0, outputs['{OutputIdentifiers.predictions.value}'])
def _load_model(path):
with open(path) as f:
Expand All @@ -165,12 +165,12 @@ def _save_predictions(y_pred, path):
@tools.register
def train(inputs, outputs, task_properties):
X = inputs['{InputIdentifiers.datasamples}'][0]
y = inputs['{InputIdentifiers.datasamples}'][1]
head_model_path = inputs.get('{InputIdentifiers.local}')
X = inputs['{InputIdentifiers.datasamples.value}'][0]
y = inputs['{InputIdentifiers.datasamples.value}'][1]
head_model_path = inputs.get('{InputIdentifiers.local.value}')
head_model = _load_model(head_model_path) if head_model_path else None
trunk_model_path = inputs.get('{InputIdentifiers.shared}')
trunk_model_path = inputs.get('{InputIdentifiers.shared.value}')
trunk_model = _load_model(trunk_model_path) if trunk_model_path else None
print(f'Composite function train X: {{X}}, y: {{y}}, head_model: {{head_model}}, trunk_model: {{trunk_model}}')
Expand All @@ -192,21 +192,21 @@ def train(inputs, outputs, task_properties):
res = dict(value= res_head + err_head), dict(value= res_trunk + err_trunk)
print(f'Composite function train head, trunk result: {{res}}')
_save_model(res[0], outputs['{OutputIdentifiers.local}'])
_save_model(res[1], outputs['{OutputIdentifiers.shared}'])
_save_model(res[0], outputs['{OutputIdentifiers.local.value}'])
_save_model(res[1], outputs['{OutputIdentifiers.shared.value}'])
@tools.register
def predict(inputs, outputs, task_properties):
X = inputs['{InputIdentifiers.datasamples}'][0]
head_model = _load_model(inputs['{InputIdentifiers.local}'])
trunk_model = _load_model(inputs['{InputIdentifiers.shared}'])
X = inputs['{InputIdentifiers.datasamples.value}'][0]
head_model = _load_model(inputs['{InputIdentifiers.local.value}'])
trunk_model = _load_model(inputs['{InputIdentifiers.shared.value}'])
print(f'Composite function predict X: {{X}}, head_model: {{head_model}}, trunk_model: {{trunk_model}}')
ratio_sum = head_model['value'] + trunk_model['value']
res = [x * ratio_sum for x in X]
print(f'Composite function predict result: {{res}}')
_save_predictions(res, outputs['{OutputIdentifiers.predictions}'])
_save_predictions(res, outputs['{OutputIdentifiers.predictions.value}'])
def _load_model(path):
with open(path) as f:
Expand Down

0 comments on commit 99be3e2

Please sign in to comment.