Skip to content

Commit

Permalink
Merge pull request #49 from MetaSys-LISBP/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
llegregam committed Oct 25, 2023
2 parents cb34a7a + 2e6b1ec commit 577d684
Show file tree
Hide file tree
Showing 5 changed files with 254 additions and 183 deletions.
3 changes: 3 additions & 0 deletions docs/models.rst
Original file line number Diff line number Diff line change
Expand Up @@ -531,6 +531,9 @@ programmatic way: ::

This will return the calculated flux values and associated statistics.

.. note:: The test data and calculation parameters (e.g. standard deviations) defined in the test function must correspond to those expected for the new model.


To test the integration of the model into the GUI, copy the :file:`.py` file
in the folder :file:`models` of PhysioFit directory. You can get the path towards this folder by opening a python
kernel in your dedicated environment and initializing an IoHandler ::
Expand Down
7 changes: 5 additions & 2 deletions physiofit/base/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ def output_plots(self, fitter, export_path):
except Exception:
raise RuntimeError("Unknown error while generating output")

def output_recap(self, export_path: str):
def output_recap(self, export_path: str, galaxy=False):

if not isinstance(self.multiple_experiments, list):
raise TypeError(
Expand All @@ -335,7 +335,10 @@ def output_recap(self, export_path: str):
final_df[["experiments", "parameter name"]] = final_df["index"].str.split(" ", expand=True)
final_df.set_index(["experiments", "parameter name"], inplace=True)
final_df.drop("index", axis=1, inplace=True)
final_df.to_csv(f"{str(Path(export_path))}/summary.csv")
if galaxy:
final_df.to_csv(str(Path(export_path)))
else:
final_df.to_csv(f"{str(Path(export_path))}/summary.csv")


def output_report(self, fitter, export_path: str |list = None):
Expand Down
171 changes: 118 additions & 53 deletions physiofit/ui/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@
import logging
from pathlib import Path
import sys
# import shutil

import pandas as pd

from physiofit.base.io import IoHandler, StandardDevs, ConfigParser

Expand Down Expand Up @@ -73,46 +76,106 @@ def args_parse():
"-oc", "--output_config", type=str,
help="Path to output the yaml config file"
)
parser.add_argument(
"-or", "--output_recap", type=str,
help="Path to output the summary"
)
parser.add_argument(
"-oz", "--output_zip", type=str,
help="Path to export zip file"
)

return parser

def run(data, args, logger, exp=None):

io = IoHandler()
logger.info(f"Input Data: \n{io.data}")
io.home_path = Path(args.data).parent
logger.info(f"Home path: {io.home_path}")
logger.info(f"Reading configuration file at {args.config}")
io.configparser = io.read_yaml(args.config)
logger.info(f"Config File:\n{io.configparser}")
model = io.select_model(io.configparser.model["model_name"], data)
model.get_params()
logger.info(f"Selected Model:\n{model}")
model = io.configparser.update_model(model)
logger.info(f"Updated Model: \n{model}")
logger.info(f"Model Data: \n{model.data}")
fitter = io.initialize_fitter(
model.data,
model=model,
mc=io.configparser.mc,
iterations=io.configparser.iterations,
sd=io.configparser.sds,
debug_mode=args.debug_mode
)
fitter.optimize()
if fitter.mc:
fitter.monte_carlo_analysis()
fitter.khi2_test()
if exp is not None:
res_path = io.home_path / (io.home_path.name + "_res") / exp
else:
res_path = io.home_path / (io.home_path.name + "_res")
if not res_path.is_dir():
res_path.mkdir(parents=True)
io.output_report(fitter, res_path)
io.output_plots(fitter, res_path)
io.output_pdf(fitter, res_path)
io.figures = []
def run(data, args, logger, experiments):

for exp in experiments:
io = IoHandler()
exp_data = data.loc[exp, :].sort_values("time").copy()
logger.info(f"Input Data: \n{exp_data}")
io.home_path = Path(args.data).parent
logger.info(f"Home path: {io.home_path}")
logger.info(f"Reading configuration file at {args.config}")
io.configparser = io.read_yaml(args.config)
logger.info(f"Config File:\n{io.configparser}")
model = io.select_model(io.configparser.model["model_name"], exp_data)
model.get_params()
logger.info(f"Selected Model:\n{model}")
model = io.configparser.update_model(model)
logger.info(f"Updated Model: \n{model}")
logger.info(f"Model Data: \n{model.data}")

fitter = io.initialize_fitter(
model.data,
model=model,
mc=io.configparser.mc,
iterations=io.configparser.iterations,
sd=io.configparser.sds,
debug_mode=args.debug_mode
)
fitter.optimize()
if fitter.mc:
fitter.monte_carlo_analysis()
fitter.khi2_test()
df = pd.DataFrame.from_dict(
fitter.parameter_stats,
orient="columns"
)
df.index = [
f"{exp} {param}" for param in fitter.model.parameters_to_estimate.keys()
]
if not io.multiple_experiments:
io.multiple_experiments = []
io.multiple_experiments.append(df)
if exp is not None:
res_path = io.home_path / (io.home_path.name + "_res") / exp
else:
res_path = io.home_path / (io.home_path.name + "_res")
logger.info(res_path)
if not res_path.is_dir():
res_path.mkdir(parents=True)
logger.info(f"Results:\n{df}")
io.output_report(fitter, str(res_path))
io.output_plots(fitter, str(res_path))
io.output_pdf(fitter, str(res_path))
io.figures = []
if args.output_zip:
# logger.info(io.home_path)
# logger.info(io.home_path / io.home_path.name / "_res")
# logger.info(args.output_zip)
dir = res_path.parents[0]
# for line in dir.rglob("*"):
# print(line)
generate_zips(str(dir), args.output_zip, logger)

# shutil.make_archive(
# args.output_zip,
# format='zip',
# root_dir=str(io.home_path / (io.home_path.name + "_res"))
# )
io.output_recap(export_path=args.output_recap, galaxy=True)

def generate_zips(path_to_data_folder, output_path, logger):
"""Generate output zip file containing results
Args:
path_to_data_folder (str): path to folder containing directories & files to zip
output_path (str): path to export archive to
logger (Logging.logger): main logger
Returns:
None:
"""
from zipfile import ZipFile
directory = Path(path_to_data_folder)
output = Path(output_path)
with ZipFile(str(output), mode="w") as archive:
for file_path in directory.rglob("*"):
archive.write(
file_path,
arcname=file_path.relative_to(directory)
)
return 0

def generate_config(args, data, logger):

Expand Down Expand Up @@ -180,11 +243,17 @@ def process(args):
raise ValueError(
f"The data path is not correct. Please check and try again. Input data path: \n{args.data}"
)
# Ensure that the input file is a tsv
if not path_to_data.suffix in [".tsv", ".txt"]:
raise TypeError(
f"The input data must be in tsv/txt format. Detected format: {path_to_data.suffix}"
)
# Ensure that the input file is a tsv if we are local
if not args.galaxy:
if not path_to_data.suffix in [".tsv", ".txt"]:
raise TypeError(
f"The input data must be in tsv/txt format. Detected format: {path_to_data.suffix}"
)
else:
if not path_to_data.suffix == ".dat":
raise TypeError(
f"The input data must be in dat format (galaxy data format). Detected format: {path_to_data.suffix}"
)

# Read & check data
data = IoHandler.read_data(str(path_to_data))
Expand All @@ -194,17 +263,13 @@ def process(args):
generate_config(args, data, logger)

# If configuration file is present we launch the optimization
if "experiments" in data.columns:
experiments = list(data["experiments"].unique())
data = data.set_index("experiments")
for exp in experiments:
logger.info(f"Running optimization for {exp}")
run_data = data.loc[exp, :].sort_values("time").copy()
run(run_data, args, logger, exp)
else:
logger.info("Running optimization")
run_data = data.sort_values("time")
run(run_data, args, logger)
experiments = list(data["experiments"].unique())
data = data.set_index("experiments")
for exp in experiments:
logger.info(f"Running optimization for {exp}")
run_data = data.loc[exp, :].sort_values("time").copy()
run(run_data, args, logger, exp)
run(data, args, logger, experiments)
logger.info("Done!")
sys.exit()

Expand Down
Loading

0 comments on commit 577d684

Please sign in to comment.