diff --git a/slides/Deep_American_Option_Parfun.ipynb b/slides/Deep_American_Option_Parfun.ipynb new file mode 100644 index 000000000..b62564bf5 --- /dev/null +++ b/slides/Deep_American_Option_Parfun.ipynb @@ -0,0 +1,237 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "47b881bd-482c-4e44-9378-01ee80f8c0cf", + "metadata": {}, + "source": [ + "# Heavy American Option Pricing with Longstaff-Schwartz (LSM) and parfun\n", + "\n", + "This notebook replaces the simple binomial tree iwth a **Longstaff-Schwartz Monte Carlo (LSM)** mode. LSM introduces:\n", + "- A full **stochastic process simulation** (GBM paths)\n", + "- **Cross-sectional regressions** at every exercise data\n", + "- Much higher computational cost\n", + "\n", + "This is representative of **production American option engines** used for long-dated and complex payoffs.\n" + ] + }, + { + "cell_type": "markdown", + "id": "c73d2b8d-e3c1-4346-8c82-4a3333a32d95", + "metadata": {}, + "source": [ + "# Imports and Environment\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "8e4e027c-3e04-4d88-88cb-a7af60f9e062", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import sys\n", + "import numpy as np\n", + "import time\n", + "\n", + "sys.stderr = open(os.devnull, \"w\")" + ] + }, + { + "cell_type": "markdown", + "id": "98538b4e-24c7-4bdd-85ef-602a78fc0a7b", + "metadata": {}, + "source": [ + "# Longstaff-Schwartz Monte Carlo (American Put)\n", + "\n", + "This implementation follows the classical Longstaff-Schwartz (2001) algorithm.\n", + "\n", + "Computational cost:\n", + "- Path simulation: O(paths x steps)\n", + "- Regression per step: O(paths x $basis^2$ x steps)\n", + "This quickly grows into **multi-minute workloads**." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "7303eb76-db11-44b8-ba21-cffde2732fa7", + "metadata": {}, + "outputs": [], + "source": [ + "def american_put_lsm(S, K, r, sigma, T, steps, paths, degree=6, seed=None):\n", + " if seed is not None:\n", + " np.random.seed(seed)\n", + "\n", + " dt = T / steps\n", + " disc = np.exp(-r * dt)\n", + "\n", + " Z = np.random.normal(size=(paths, steps))\n", + " S_paths = np.empty((paths, steps + 1))\n", + " S_paths[:, 0] = S\n", + "\n", + " for t in range(steps):\n", + " S_paths[:, t + 1] = S_paths[:, t] * np.exp(\n", + " (r - 0.5 * sigma **2) * dt + sigma * np.sqrt(dt) * Z[:, t]\n", + " )\n", + "\n", + " # Payoff at maturity\n", + " cashflows = np.maximum(K - S_paths[:, -1], 0.0)\n", + "\n", + " # Backward inducion\n", + " for t in range(steps - 1, 0, -1):\n", + " itm = S_paths[:, t] < K\n", + " X = S_paths[itm, t]\n", + " Y = cashflows[itm] * disc\n", + "\n", + " if len(X) > degree:\n", + " coeffs = np.polyfit(X, Y, degree)\n", + " continuation = np.polyval(coeffs, X)\n", + " else:\n", + " continuation = np.zeros_like(X)\n", + "\n", + " exercise = K - X\n", + " exercise_now = exercise > continuation\n", + "\n", + " cashflows[itm] = np.where(\n", + " exercise_now,\n", + " exercise,\n", + " Y\n", + " )\n", + "\n", + " return cashflows.mean() * disc\n", + " " + ] + }, + { + "cell_type": "markdown", + "id": "161a01a5-18ac-4833-8328-b3a4772c4297", + "metadata": {}, + "source": [ + "# Heavy Sequetial Workload\n", + "\n", + "We price the same option under many volatility scenarios and repeat this across a large batch. This mimics real-world **scenario analysis / stress testing** workloads." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "33354277-c135-482e-ad05-d00cff5ec6a4", + "metadata": {}, + "outputs": [], + "source": [ + "# Heavy parameters\n", + "PATHS = 80_000 # Monte Carlo paths (increase to push single run runtime)\n", + "STEPS = 252 # daily exercise dates\n", + "DEGREE = 6 # regression basis complexity; increases single run time\n", + "BATCH_SIZE = 4 # portfolio size. It increases\n", + "SCENARIOS = np.linspace(0.15, 0.35, 20)\n", + "\n", + "base_param = (100.0, 100.0, 0.05, 0.2, 1.0, STEPS, PATHS)\n", + "BATCH = [base_param] * BATCH_SIZE\n", + "\n", + "\n", + "def price_with_scenarios(p, scenarios):\n", + " S, K, r, _, T, St, N = p\n", + " acc = [american_put_lsm(S, K, r, vol, T, St, N, DEGREE) for vol in scenarios]\n", + " return sum(acc)\n", + "\n", + "\n", + "def batch_price_with_scenarios(tasks, scenarios):\n", + " return [price_with_scenarios(p, scenarios)/len(scenarios) for p in tasks]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73b809f9-2aba-40eb-90d7-1366b42a2d8b", + "metadata": {}, + "outputs": [], + "source": [ + "start = time.time()\n", + "results_seq = batch_price_with_scenarios(BATCH, SCENARIOS)\n", + "seq_time = time.time() - start\n", + "\n", + "print(f\"Sequential runtime: {seq_time / 60:.2f} minutes\")" + ] + }, + { + "cell_type": "markdown", + "id": "e1b10f72-527d-4154-a608-93bb893bb84e", + "metadata": {}, + "source": [ + "# Parallel Execution with parfun\n", + "\n", + "We now parallellize the outer batch loop using **parfun**. Only a decorator and a function call change are required." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb1d234c-d74f-46d3-81a4-b0a280af72c2", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Requires: pip install opengris-parfun\n", + "import parfun as pf\n", + "from typing import List, Tuple\n", + "\n", + "@pf.parfun(split=pf.per_argument(tasks=pf.py_list.by_chunk), combine_with=pf.py_list.concat, fixed_partition_size=1)\n", + "def batch_price_with_scenarios_w_parfun(tasks, scenarios):\n", + " return [price_with_scenarios(p, scenarios)/len(scenarios) for p in tasks]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "76c24a6b-e3ab-4aa5-8b36-4a1ef0f7bbc1", + "metadata": {}, + "outputs": [], + "source": [ + "start = time.time()\n", + "with pf.set_parallel_backend_context(\"scaler_local\", n_workers=4):\n", + " results_par = batch_price_with_scenarios_w_parfun(BATCH, SCENARIOS)\n", + "par_time = time.time() - start\n", + "\n", + "print(f\"Parallel runtime: {par_time / 60:.2f} minutes\")\n", + "print(f\"Speedup: {seq_time / par_time:.2f}x\")" + ] + }, + { + "cell_type": "markdown", + "id": "33443fa8-af01-49d3-8d8e-680c9e933c6d", + "metadata": {}, + "source": [ + "# Interpretation\n", + "- The sequential run should take **~10 minutes** on a single core (machine-dependent).\n", + "- the parfun version distributes work across available cores automatically.\n", + "- Speedup should approach the number of physical cores for this embarrasingly parallel workload.\n", + "\n", + "This pattern closely matches real-world **risk, stress testing, and model validation** workloads." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.14.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/slides/Parallel_Calibration_Parfun.ipynb b/slides/Parallel_Calibration_Parfun.ipynb new file mode 100644 index 000000000..df6a48f88 --- /dev/null +++ b/slides/Parallel_Calibration_Parfun.ipynb @@ -0,0 +1,817 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "aaeef758-90c1-47f6-8d8f-7b682bdb8af1", + "metadata": {}, + "source": [ + "# Parallel Calibration of Stochastic Models with ParFun + QuantLib\n", + " \n", + "This notebook demonstrates how **ParFun** can parallelize QuantLib calibration\n", + "tasks with *minimal code changes*. \n", + " \n", + "We calibrate:\n", + " - **Heston model** using multi-start optimization \n", + " - **SABR model** per-expiry calibration \n", + " - **Hull–White model** per-curve calibration \n", + " \n", + "ParFun: https://github.com/finos/opengris-parfun\n", + "QuantLib: https://www.quantlib.org/" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "fc547464-6f7a-4207-a4de-ea9c98a478ca", + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "\n", + "import numpy as np\n", + "import QuantLib as ql\n", + "\n", + "import parfun as pf" + ] + }, + { + "cell_type": "markdown", + "id": "6a073454-4c64-435f-9a8c-a7bdf365d712", + "metadata": {}, + "source": [ + "------------------------------------------------------------\n", + "1. HESTON MULTI‑START CALIBRATION\n", + "------------------------------------------------------------\n", + "\n", + "Calibrate a Heston model multiple times with different initial guesses.\n", + "ParFun makes this trivially parallel." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "c9423309-05c7-4d32-8d0b-c698e09182f5", + "metadata": {}, + "outputs": [], + "source": [ + "def calibrate_heston_once(initial_guess):\n", + " \"\"\"Calibrate a Heston model with a given initial guess.\n", + " Uses a dense strike x maturity grid to create a realistic workload.\n", + " Returns calibrated parameters and error.\"\"\"\n", + " spot = 100\n", + " risk_free = 0.01\n", + " dividend = 0.00\n", + "\n", + " rf_handle = ql.YieldTermStructureHandle(ql.FlatForward(0, ql.NullCalendar(), risk_free, ql.Actual365Fixed()))\n", + " div_handle = ql.YieldTermStructureHandle(ql.FlatForward(0, ql.NullCalendar(), dividend, ql.Actual365Fixed()))\n", + "\n", + " process = ql.HestonProcess(\n", + " rf_handle, div_handle,\n", + " ql.QuoteHandle(ql.SimpleQuote(spot)),\n", + " initial_guess[0], initial_guess[1], initial_guess[2],\n", + " initial_guess[3], initial_guess[4],\n", + " )\n", + "\n", + " model = ql.HestonModel(process)\n", + " engine = ql.AnalyticHestonEngine(model)\n", + "\n", + " strikes = [70, 80, 85, 90, 95, 100, 105, 110, 115, 120, 130]\n", + " maturities = [ql.Period(m, ql.Months) for m in [3, 6, 12, 18, 24]]\n", + " base_vols = {\n", + " 70: 0.30, 80: 0.26, 85: 0.24, 90: 0.22, 95: 0.21,\n", + " 100: 0.20, 105: 0.21, 110: 0.22, 115: 0.24, 120: 0.26, 130: 0.30,\n", + " }\n", + "\n", + " helpers = []\n", + " for mat in maturities:\n", + " for k in strikes:\n", + " h = ql.HestonModelHelper(\n", + " mat, ql.NullCalendar(), spot, k,\n", + " ql.QuoteHandle(ql.SimpleQuote(base_vols[k])),\n", + " rf_handle, div_handle,\n", + " )\n", + " h.setPricingEngine(engine)\n", + " helpers.append(h)\n", + "\n", + " model.calibrate(helpers, ql.LevenbergMarquardt(), ql.EndCriteria(5000, 500, 1e-8, 1e-8, 1e-8))\n", + " error = sum(h.calibrationError() for h in helpers)\n", + "\n", + " return list(model.params()), error\n", + "\n", + "\n", + "@pf.parfun(\n", + " split=pf.per_argument(initial_guesses=pf.py_list.by_chunk),\n", + " combine_with=pf.py_list.concat,\n", + " fixed_partition_size=1,\n", + ")\n", + "def calibrate_heston_parallel(initial_guesses):\n", + " return [calibrate_heston_once(g) for g in initial_guesses]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "af5acbfc-8fcd-480e-b76d-a858a5634011", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sequential: 143.42s\n", + "[INFO]2026-03-27 07:33:34+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:34+0800: ObjectStorageServer: start and listen to tcp://127.0.0.1:44807\n", + "[INFO]2026-03-27 07:33:34+0800: ObjectStorageServer: started\n", + "[INFO]2026-03-27 07:33:34+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:34+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: scheduler_address = tcp://127.0.0.1:51207\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: object_storage_address = tcp://127.0.0.1:44807\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: monitor_address = None\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: protected = True\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: max_number_of_tasks_waiting = -1\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: client_timeout_seconds = 60\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: worker_timeout_seconds = 60\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: object_retention_seconds = 60\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: load_balance_seconds = 1\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: load_balance_trigger_times = 2\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: event_loop = builtin\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: io_threads = 1\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: logging_config = LoggingConfig(paths=('/dev/stdout',), config_file=None, level='INFO')\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: policy = PolicyConfig(policy_engine_type='simple', policy_content='allocate=even_load; scaling=no')\n", + "[INFO]2026-03-27 07:33:34+0800: Scheduler: listen to scheduler address tcp://127.0.0.1:51207\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: updated `object_storage_address` from `tcp://127.0.0.1:44807` to `tcp://127.0.0.1:44807`\n", + "[INFO]2026-03-27 07:33:34+0800: Scheduler: connect to object storage server tcp://127.0.0.1:44807\n", + "[INFO]2026-03-27 07:33:34+0800: ConfigController: updated `monitor_address` from `None` to `tcp://127.0.0.1:51209`\n", + "[INFO]2026-03-27 07:33:34+0800: Scheduler: listen to scheduler monitor address tcp://127.0.0.1:51209\n", + "[INFO]2026-03-27 07:33:34+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:33:34+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:33:34+0800: VanillaGraphTaskController: started\n", + "[INFO]2026-03-27 07:33:34+0800: VanillaBalanceController: started\n", + "[INFO]2026-03-27 07:33:34+0800: VanillaClientController: started\n", + "[INFO]2026-03-27 07:33:34+0800: VanillaObjectController: started\n", + "[INFO]2026-03-27 07:33:34+0800: VanillaWorkerController: started\n", + "[INFO]2026-03-27 07:33:34+0800: WorkerManagerController: started\n", + "[INFO]2026-03-27 07:33:34+0800: VanillaInformationController: started\n", + "[INFO]2026-03-27 07:33:34+0800: ClientID(Client|0d65b71ce5124bfdb510bdec3da54718) connected\n", + "[INFO]2026-03-27 07:33:35+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:35+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:35+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:33:35+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:33:35+0800: WorkerID(Worker|FIX|3e146e1c2e83401db410419c1862d59b|ed8ff763f9ab40a591762cd7788dff8c): start Processor[621870]\n", + "[INFO]2026-03-27 07:33:35+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:33:35+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:33:35+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: WorkerID(Worker|FIX|e290421c40284e609c923a06014155e7|4afcf8920b3c464985225ce7784b4246): start Processor[621871]\n", + "[INFO]2026-03-27 07:33:35+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:33:35+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: worker WorkerID(Worker|FIX|3e146e1c2e83401db410419c1862d59b|ed8ff763f9ab40a591762cd7788dff8c) connected\n", + "[INFO]2026-03-27 07:33:35+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: worker WorkerID(Worker|FIX|e290421c40284e609c923a06014155e7|4afcf8920b3c464985225ce7784b4246) connected\n", + "[INFO]2026-03-27 07:33:35+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:35+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:33:35+0800: WorkerID(Worker|FIX|1ac29911680a46e990aad361b3d38e59|c4643b9abe494bfea9da7e8f31284ddb): start Processor[621874]\n", + "[INFO]2026-03-27 07:33:35+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:33:35+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:33:35+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: worker WorkerID(Worker|FIX|1ac29911680a46e990aad361b3d38e59|c4643b9abe494bfea9da7e8f31284ddb) connected\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:35+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:33:35+0800: WorkerID(Worker|FIX|673412f7f1644379978267ec5648339a|8423e45aa6d64e909128ae68dbb61dc0): start Processor[621877]\n", + "[INFO]2026-03-27 07:33:35+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:33:35+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:33:35+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:33:35+0800: worker WorkerID(Worker|FIX|673412f7f1644379978267ec5648339a|8423e45aa6d64e909128ae68dbb61dc0) connected\n", + "[INFO]2026-03-27 07:33:35+0800: balancing task: {WorkerID(Worker|FIX|3e146e1c2e83401db410419c1862d59b|ed8ff763f9ab40a591762cd7788dff8c): 3}\n", + "[INFO]2026-03-27 07:33:35+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:35+0800: Processor[621870] connecting to object storage at tcp://127.0.0.1:44807...\n", + "[INFO]2026-03-27 07:33:35+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:35+0800: Processor[621871] connecting to object storage at tcp://127.0.0.1:44807...\n", + "[INFO]2026-03-27 07:33:35+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:35+0800: Processor[621874] connecting to object storage at tcp://127.0.0.1:44807...\n", + "[INFO]2026-03-27 07:33:35+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:33:35+0800: Processor[621877] connecting to object storage at tcp://127.0.0.1:44807...\n", + "[INFO]2026-03-27 07:33:37+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:33:38+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:33:38+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:33:38+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:34:14+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:34:14+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:34:15+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:34:16+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:34:45+0800: ClientID(Client|0d65b71ce5124bfdb510bdec3da54718) disconnected\n", + "[INFO]2026-03-27 07:34:45+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:34:45+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:34:45+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:34:45+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:34:45+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:34:45+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:34:45+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:34:45+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:34:45+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:34:45+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:34:45+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|3e146e1c2e83401db410419c1862d59b|ed8ff763f9ab40a591762cd7788dff8c) disconnected\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|e290421c40284e609c923a06014155e7|4afcf8920b3c464985225ce7784b4246) disconnected\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|673412f7f1644379978267ec5648339a|8423e45aa6d64e909128ae68dbb61dc0) disconnected\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|1ac29911680a46e990aad361b3d38e59|c4643b9abe494bfea9da7e8f31284ddb) disconnected\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|3e146e1c2e83401db410419c1862d59b|ed8ff763f9ab40a591762cd7788dff8c): stop Processor[621870], reason: quit\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|3e146e1c2e83401db410419c1862d59b|ed8ff763f9ab40a591762cd7788dff8c): quit\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|673412f7f1644379978267ec5648339a|8423e45aa6d64e909128ae68dbb61dc0): stop Processor[621877], reason: quit\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|673412f7f1644379978267ec5648339a|8423e45aa6d64e909128ae68dbb61dc0): quit\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|e290421c40284e609c923a06014155e7|4afcf8920b3c464985225ce7784b4246): stop Processor[621871], reason: quit\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|e290421c40284e609c923a06014155e7|4afcf8920b3c464985225ce7784b4246): quit\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|1ac29911680a46e990aad361b3d38e59|c4643b9abe494bfea9da7e8f31284ddb): stop Processor[621874], reason: quit\n", + "[INFO]2026-03-27 07:34:45+0800: WorkerID(Worker|FIX|1ac29911680a46e990aad361b3d38e59|c4643b9abe494bfea9da7e8f31284ddb): quit\n", + "[INFO]2026-03-27 07:34:46+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:34:46+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:34:46+0800: VanillaGraphTaskController: exited\n", + "[INFO]2026-03-27 07:34:46+0800: VanillaBalanceController: exited\n", + "[INFO]2026-03-27 07:34:46+0800: VanillaClientController: exited\n", + "[INFO]2026-03-27 07:34:46+0800: VanillaObjectController: exited\n", + "[INFO]2026-03-27 07:34:46+0800: VanillaWorkerController: exited\n", + "[INFO]2026-03-27 07:34:46+0800: WorkerManagerController: exited\n", + "[INFO]2026-03-27 07:34:46+0800: VanillaInformationController: exited\n", + "[INFO]2026-03-27 07:34:46+0800: ObjectStorageServer: stopped by user\n", + "Parallel: 72.27s\n", + "Speedup: 1.98x\n" + ] + } + ], + "source": [ + "initial_guesses = [\n", + " [0.1, 1.0, 0.05, 0.3, -0.5],\n", + " [0.2, 0.5, 0.04, 0.4, -0.3],\n", + " [0.05, 2.0, 0.07, 0.2, -0.7],\n", + " [0.15, 1.5, 0.06, 0.25, -0.4],\n", + " [0.08, 0.8, 0.03, 0.35, -0.6],\n", + " [0.12, 1.2, 0.08, 0.15, -0.2],\n", + " [0.18, 0.3, 0.05, 0.5, -0.8],\n", + " [0.06, 1.8, 0.04, 0.28, -0.45],\n", + "]\n", + "\n", + "start = time.time()\n", + "results_heston_seq = [calibrate_heston_once(g) for g in initial_guesses]\n", + "heston_seq_time = time.time() - start\n", + "print(f\"Sequential: {heston_seq_time:.2f}s\")\n", + "\n", + "start = time.time()\n", + "with pf.set_parallel_backend_context(\"scaler_local\", n_workers=4):\n", + " results_heston = calibrate_heston_parallel(initial_guesses)\n", + "heston_par_time = time.time() - start\n", + "print(f\"Parallel: {heston_par_time:.2f}s\")\n", + "print(f\"Speedup: {heston_seq_time / heston_par_time:.2f}x\")" + ] + }, + { + "cell_type": "markdown", + "id": "96700640-08bc-4454-ae2c-274f09da35d1", + "metadata": {}, + "source": [ + "------------------------------------------------------------\n", + "2. SABR PER‑EXPIRY CALIBRATION\n", + "------------------------------------------------------------\n", + "\n", + "Calibrate SABR parameters independently for each expiry of a volatility surface.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "74a7ef8a-1b58-44e6-84ef-bdf7de98fb82", + "metadata": {}, + "outputs": [], + "source": [ + "def calibrate_sabr(expiry_data):\n", + " \"\"\"Calibrate SABR parameters for a single expiry using random search.\n", + " expiry_data = (expiry_time, forward, strikes, market_vols)\n", + " Returns (expiry, calibrated_params_dict, fit_error).\"\"\"\n", + " expiry, forward, strikes, market_vols = expiry_data\n", + " beta = 0.5 # fixed\n", + " market_vols_arr = np.array(market_vols)\n", + "\n", + " best_error = np.inf\n", + " best_params = None\n", + " rng = np.random.RandomState(int(expiry * 1000))\n", + "\n", + " for _ in range(100_000):\n", + " alpha = rng.uniform(0.01, 2.0)\n", + " nu = rng.uniform(0.01, 2.0)\n", + " rho = rng.uniform(-0.999, 0.999)\n", + " try:\n", + " vols = np.array([\n", + " ql.sabrVolatility(float(k), float(forward), float(expiry), alpha, beta, nu, rho)\n", + " for k in strikes\n", + " ])\n", + " error = ((vols - market_vols_arr) ** 2).sum()\n", + " if error < best_error:\n", + " best_error = error\n", + " best_params = (alpha, nu, rho)\n", + " except RuntimeError:\n", + " continue\n", + "\n", + " alpha, nu, rho = best_params\n", + " return expiry, {\"alpha\": alpha, \"beta\": beta, \"nu\": nu, \"rho\": rho}, best_error\n", + "\n", + "\n", + "@pf.parfun(\n", + " split=pf.per_argument(expiry_data_list=pf.py_list.by_chunk),\n", + " combine_with=pf.py_list.concat,\n", + " fixed_partition_size=1,\n", + ")\n", + "def calibrate_sabr_parallel(expiry_data_list):\n", + " return [calibrate_sabr(d) for d in expiry_data_list]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "c44dc2f0-3260-47a5-b191-836f6f10b8ea", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sequential: 11.67s\n", + "[INFO]2026-03-27 07:34:58+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:58+0800: ObjectStorageServer: start and listen to tcp://127.0.0.1:60397\n", + "[INFO]2026-03-27 07:34:58+0800: ObjectStorageServer: started\n", + "[INFO]2026-03-27 07:34:58+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:58+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: scheduler_address = tcp://127.0.0.1:43897\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: object_storage_address = tcp://127.0.0.1:60397\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: monitor_address = None\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: protected = True\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: max_number_of_tasks_waiting = -1\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: client_timeout_seconds = 60\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: worker_timeout_seconds = 60\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: object_retention_seconds = 60\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: load_balance_seconds = 1\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: load_balance_trigger_times = 2\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: event_loop = builtin\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: io_threads = 1\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: logging_config = LoggingConfig(paths=('/dev/stdout',), config_file=None, level='INFO')\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: policy = PolicyConfig(policy_engine_type='simple', policy_content='allocate=even_load; scaling=no')\n", + "[INFO]2026-03-27 07:34:58+0800: Scheduler: listen to scheduler address tcp://127.0.0.1:43897\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: updated `object_storage_address` from `tcp://127.0.0.1:60397` to `tcp://127.0.0.1:60397`\n", + "[INFO]2026-03-27 07:34:58+0800: Scheduler: connect to object storage server tcp://127.0.0.1:60397\n", + "[INFO]2026-03-27 07:34:58+0800: ConfigController: updated `monitor_address` from `None` to `tcp://127.0.0.1:43899`\n", + "[INFO]2026-03-27 07:34:58+0800: Scheduler: listen to scheduler monitor address tcp://127.0.0.1:43899\n", + "[INFO]2026-03-27 07:34:58+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:34:58+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:34:58+0800: VanillaGraphTaskController: started\n", + "[INFO]2026-03-27 07:34:58+0800: VanillaBalanceController: started\n", + "[INFO]2026-03-27 07:34:58+0800: VanillaClientController: started\n", + "[INFO]2026-03-27 07:34:58+0800: VanillaObjectController: started\n", + "[INFO]2026-03-27 07:34:58+0800: VanillaWorkerController: started\n", + "[INFO]2026-03-27 07:34:58+0800: WorkerManagerController: started\n", + "[INFO]2026-03-27 07:34:58+0800: VanillaInformationController: started\n", + "[INFO]2026-03-27 07:34:58+0800: ClientID(Client|a8a2eca8c27c43ca88b830be2e8811f7) connected\n", + "[INFO]2026-03-27 07:34:59+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:59+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:34:59+0800: WorkerID(Worker|FIX|ea9dba67d4e6494cb165f3b98bdb72c9|9717d7f4c9fd432994b7988373e521aa): start Processor[621961]\n", + "[INFO]2026-03-27 07:34:59+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:34:59+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:34:59+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: worker WorkerID(Worker|FIX|ea9dba67d4e6494cb165f3b98bdb72c9|9717d7f4c9fd432994b7988373e521aa) connected\n", + "[INFO]2026-03-27 07:34:59+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:59+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:34:59+0800: WorkerID(Worker|FIX|00f8b6e2290e4cf2a562576f9f2a71f0|a3e929522980415893f8a41e411c02fc): start Processor[621964]\n", + "[INFO]2026-03-27 07:34:59+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:34:59+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:34:59+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: worker WorkerID(Worker|FIX|00f8b6e2290e4cf2a562576f9f2a71f0|a3e929522980415893f8a41e411c02fc) connected\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:59+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:34:59+0800: WorkerID(Worker|FIX|082985ab05b141b58d65e8b5a2b86b34|668bdc4e4e074a6ca7c73440751dc5ef): start Processor[621967]\n", + "[INFO]2026-03-27 07:34:59+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:34:59+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:34:59+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:34:59+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:59+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: worker WorkerID(Worker|FIX|082985ab05b141b58d65e8b5a2b86b34|668bdc4e4e074a6ca7c73440751dc5ef) connected\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: WorkerID(Worker|FIX|0a0ace9e5ca1428eb0af0f595be309f7|4def1c9b5a004032b20fc76d25e68b2a): start Processor[621970]\n", + "[INFO]2026-03-27 07:34:59+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:34:59+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:34:59+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:34:59+0800: worker WorkerID(Worker|FIX|0a0ace9e5ca1428eb0af0f595be309f7|4def1c9b5a004032b20fc76d25e68b2a) connected\n", + "[INFO]2026-03-27 07:34:59+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:59+0800: Processor[621961] connecting to object storage at tcp://127.0.0.1:60397...\n", + "[INFO]2026-03-27 07:34:59+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:59+0800: Processor[621964] connecting to object storage at tcp://127.0.0.1:60397...\n", + "[INFO]2026-03-27 07:34:59+0800: balancing task: {WorkerID(Worker|FIX|ea9dba67d4e6494cb165f3b98bdb72c9|9717d7f4c9fd432994b7988373e521aa): 3}\n", + "[INFO]2026-03-27 07:34:59+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:59+0800: Processor[621967] connecting to object storage at tcp://127.0.0.1:60397...\n", + "[INFO]2026-03-27 07:34:59+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:34:59+0800: Processor[621970] connecting to object storage at tcp://127.0.0.1:60397...\n", + "[INFO]2026-03-27 07:35:01+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:35:02+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:35:02+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:35:02+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:35:05+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:35:05+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:35:05+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:35:05+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:35:08+0800: ClientID(Client|a8a2eca8c27c43ca88b830be2e8811f7) disconnected\n", + "[INFO]2026-03-27 07:35:08+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:35:08+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:35:08+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:35:08+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:35:08+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:35:08+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:35:08+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:35:08+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:35:08+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:35:08+0800: WorkerID(Worker|FIX|ea9dba67d4e6494cb165f3b98bdb72c9|9717d7f4c9fd432994b7988373e521aa) disconnected\n", + "[INFO]2026-03-27 07:35:08+0800: WorkerID(Worker|FIX|00f8b6e2290e4cf2a562576f9f2a71f0|a3e929522980415893f8a41e411c02fc) disconnected\n", + "[INFO]2026-03-27 07:35:08+0800: WorkerID(Worker|FIX|0a0ace9e5ca1428eb0af0f595be309f7|4def1c9b5a004032b20fc76d25e68b2a) disconnected\n", + "[INFO]2026-03-27 07:35:08+0800: WorkerID(Worker|FIX|082985ab05b141b58d65e8b5a2b86b34|668bdc4e4e074a6ca7c73440751dc5ef) disconnected\n", + "[INFO]2026-03-27 07:35:09+0800: WorkerID(Worker|FIX|00f8b6e2290e4cf2a562576f9f2a71f0|a3e929522980415893f8a41e411c02fc): stop Processor[621964], reason: quit\n", + "[INFO]2026-03-27 07:35:09+0800: WorkerID(Worker|FIX|00f8b6e2290e4cf2a562576f9f2a71f0|a3e929522980415893f8a41e411c02fc): quit\n", + "[INFO]2026-03-27 07:35:09+0800: WorkerID(Worker|FIX|0a0ace9e5ca1428eb0af0f595be309f7|4def1c9b5a004032b20fc76d25e68b2a): stop Processor[621970], reason: quit\n", + "[INFO]2026-03-27 07:35:09+0800: WorkerID(Worker|FIX|082985ab05b141b58d65e8b5a2b86b34|668bdc4e4e074a6ca7c73440751dc5ef): stop Processor[621967], reason: quit\n", + "[INFO]2026-03-27 07:35:09+0800: WorkerID(Worker|FIX|0a0ace9e5ca1428eb0af0f595be309f7|4def1c9b5a004032b20fc76d25e68b2a): quit\n", + "[INFO]2026-03-27 07:35:09+0800: WorkerID(Worker|FIX|082985ab05b141b58d65e8b5a2b86b34|668bdc4e4e074a6ca7c73440751dc5ef): quit\n", + "[INFO]2026-03-27 07:35:09+0800: WorkerID(Worker|FIX|ea9dba67d4e6494cb165f3b98bdb72c9|9717d7f4c9fd432994b7988373e521aa): stop Processor[621961], reason: quit\n", + "[INFO]2026-03-27 07:35:09+0800: WorkerID(Worker|FIX|ea9dba67d4e6494cb165f3b98bdb72c9|9717d7f4c9fd432994b7988373e521aa): quit\n", + "[INFO]2026-03-27 07:35:09+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:35:09+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:35:09+0800: VanillaGraphTaskController: exited\n", + "[INFO]2026-03-27 07:35:09+0800: VanillaBalanceController: exited\n", + "[INFO]2026-03-27 07:35:09+0800: VanillaClientController: exited\n", + "[INFO]2026-03-27 07:35:09+0800: VanillaObjectController: exited\n", + "[INFO]2026-03-27 07:35:09+0800: VanillaWorkerController: exited\n", + "[INFO]2026-03-27 07:35:09+0800: WorkerManagerController: exited\n", + "[INFO]2026-03-27 07:35:09+0800: VanillaInformationController: exited\n", + "[INFO]2026-03-27 07:35:09+0800: ObjectStorageServer: stopped by user\n", + "Parallel: 11.96s\n", + "Speedup: 0.98x\n" + ] + } + ], + "source": [ + "strikes = [80.0, 90.0, 100.0, 110.0, 120.0]\n", + "expiries = [0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 3.0, 5.0]\n", + "sabr_data = [\n", + " (T, 100.0, strikes, [0.25 + 0.02 * (i + 1), 0.22 + 0.01 * i, 0.20, 0.21 + 0.01 * i, 0.23 + 0.02 * i])\n", + " for i, T in enumerate(expiries)\n", + "]\n", + "\n", + "start = time.time()\n", + "results_sabr_seq = [calibrate_sabr(d) for d in sabr_data]\n", + "sabr_seq_time = time.time() - start\n", + "print(f\"Sequential: {sabr_seq_time:.2f}s\")\n", + "\n", + "start = time.time()\n", + "with pf.set_parallel_backend_context(\"scaler_local\", n_workers=4):\n", + " results_sabr = calibrate_sabr_parallel(sabr_data)\n", + "sabr_par_time = time.time() - start\n", + "print(f\"Parallel: {sabr_par_time:.2f}s\")\n", + "print(f\"Speedup: {sabr_seq_time / sabr_par_time:.2f}x\")" + ] + }, + { + "cell_type": "markdown", + "id": "58da061e-af3f-42ee-abf2-8e26b378e9fd", + "metadata": {}, + "source": [ + "------------------------------------------------------------\n", + "3. HULL–WHITE PER‑CURVE CALIBRATION\n", + "------------------------------------------------------------\n", + "\n", + "Calibrate a Hull–White model for multiple yield curves independently.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "07c1404a-fde3-4011-b27d-71993c77b998", + "metadata": {}, + "outputs": [], + "source": [ + "def calibrate_hw(curve_data):\n", + " \"\"\"Calibrate a Hull-White model for a yield curve.\n", + " curve_data = (curve_id, base_rate)\n", + " Returns (curve_id, calibrated_params).\"\"\"\n", + " curve_id, base_rate = curve_data\n", + " today = ql.Date(1, 1, 2025)\n", + " ql.Settings.instance().evaluationDate = today\n", + "\n", + " tenors_months = [0, 6, 12, 24, 36, 60, 84, 120, 180, 240, 360, 480]\n", + " dates = [today + ql.Period(m, ql.Months) for m in tenors_months]\n", + " rates = [base_rate + i * 0.002 for i in range(len(tenors_months))]\n", + "\n", + " curve = ql.ZeroCurve(dates, rates, ql.Actual365Fixed())\n", + " curve_handle = ql.YieldTermStructureHandle(curve)\n", + " index = ql.Euribor6M(curve_handle)\n", + "\n", + " model = ql.HullWhite(curve_handle)\n", + " engine = ql.TreeSwaptionEngine(model, 150)\n", + "\n", + " helpers = []\n", + " for expiry_years in [1, 2, 3, 5, 7, 10, 15, 20]:\n", + " for swap_tenor in [2, 5, 10, 15]:\n", + " h = ql.SwaptionHelper(\n", + " ql.Period(expiry_years, ql.Years),\n", + " ql.Period(swap_tenor, ql.Years),\n", + " ql.QuoteHandle(ql.SimpleQuote(0.0055)),\n", + " index, index.tenor(), index.dayCounter(), index.dayCounter(),\n", + " curve_handle,\n", + " )\n", + " h.setPricingEngine(engine)\n", + " helpers.append(h)\n", + "\n", + " model.calibrate(\n", + " helpers, ql.LevenbergMarquardt(),\n", + " ql.EndCriteria(5000, 500, 1e-8, 1e-8, 1e-8),\n", + " )\n", + " return curve_id, list(model.params())\n", + "\n", + "\n", + "@pf.parfun(\n", + " split=pf.per_argument(curve_data_list=pf.py_list.by_chunk),\n", + " combine_with=pf.py_list.concat,\n", + " fixed_partition_size=1,\n", + ")\n", + "def calibrate_hw_parallel(curve_data_list):\n", + " return [calibrate_hw(d) for d in curve_data_list]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "2691c59a-9c54-46e1-a186-6b4ed888604d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sequential: 171.53s\n", + "[INFO]2026-03-27 07:38:01+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:01+0800: ObjectStorageServer: start and listen to tcp://127.0.0.1:41361\n", + "[INFO]2026-03-27 07:38:01+0800: ObjectStorageServer: started\n", + "[INFO]2026-03-27 07:38:02+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:02+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: scheduler_address = tcp://127.0.0.1:58883\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: object_storage_address = tcp://127.0.0.1:41361\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: monitor_address = None\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: protected = True\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: max_number_of_tasks_waiting = -1\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: client_timeout_seconds = 60\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: worker_timeout_seconds = 60\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: object_retention_seconds = 60\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: load_balance_seconds = 1\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: load_balance_trigger_times = 2\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: event_loop = builtin\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: io_threads = 1\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: logging_config = LoggingConfig(paths=('/dev/stdout',), config_file=None, level='INFO')\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: policy = PolicyConfig(policy_engine_type='simple', policy_content='allocate=even_load; scaling=no')\n", + "[INFO]2026-03-27 07:38:02+0800: Scheduler: listen to scheduler address tcp://127.0.0.1:58883\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: updated `object_storage_address` from `tcp://127.0.0.1:41361` to `tcp://127.0.0.1:41361`\n", + "[INFO]2026-03-27 07:38:02+0800: Scheduler: connect to object storage server tcp://127.0.0.1:41361\n", + "[INFO]2026-03-27 07:38:02+0800: ConfigController: updated `monitor_address` from `None` to `tcp://127.0.0.1:58885`\n", + "[INFO]2026-03-27 07:38:02+0800: Scheduler: listen to scheduler monitor address tcp://127.0.0.1:58885\n", + "[INFO]2026-03-27 07:38:02+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:38:02+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaGraphTaskController: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaBalanceController: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaClientController: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaObjectController: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaWorkerController: started\n", + "[INFO]2026-03-27 07:38:02+0800: WorkerManagerController: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaInformationController: started\n", + "[INFO]2026-03-27 07:38:02+0800: ClientID(Client|6d921406f67b419390861fc3ae1c4784) connected\n", + "[INFO]2026-03-27 07:38:02+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:02+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:38:02+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:02+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:38:02+0800: WorkerID(Worker|FIX|0458313bb4f843a395d348eb35b5bbf8|2b3d4cbd94f24d148ec18bbf3de31c34): start Processor[622086]\n", + "[INFO]2026-03-27 07:38:02+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:38:02+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:38:02+0800: WorkerID(Worker|FIX|cb6ec69444514a829e35fd1dd4b942f1|fc54532e06b34089b1f3a71af823ca15): start Processor[622087]\n", + "[INFO]2026-03-27 07:38:02+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:38:02+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:38:02+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: worker WorkerID(Worker|FIX|cb6ec69444514a829e35fd1dd4b942f1|fc54532e06b34089b1f3a71af823ca15) connected\n", + "[INFO]2026-03-27 07:38:02+0800: worker WorkerID(Worker|FIX|0458313bb4f843a395d348eb35b5bbf8|2b3d4cbd94f24d148ec18bbf3de31c34) connected\n", + "[INFO]2026-03-27 07:38:02+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:02+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:38:02+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:02+0800: use event loop: builtin\n", + "[INFO]2026-03-27 07:38:02+0800: WorkerID(Worker|FIX|5df701cdddba43b3a09dd93abdbd3e17|b7fab4702f8f4c4bbdcd9853e194b0f6): start Processor[622092]\n", + "[INFO]2026-03-27 07:38:02+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:38:02+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:38:02+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: worker WorkerID(Worker|FIX|5df701cdddba43b3a09dd93abdbd3e17|b7fab4702f8f4c4bbdcd9853e194b0f6) connected\n", + "[INFO]2026-03-27 07:38:02+0800: WorkerID(Worker|FIX|52317e2b092849e58667d2041d15188c|a1bd45d8988e47f2a2bd0f0aa26661dd): start Processor[622093]\n", + "[INFO]2026-03-27 07:38:02+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 07:38:02+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 07:38:02+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: worker WorkerID(Worker|FIX|52317e2b092849e58667d2041d15188c|a1bd45d8988e47f2a2bd0f0aa26661dd) connected\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 07:38:02+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 07:38:03+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:03+0800: Processor[622086] connecting to object storage at tcp://127.0.0.1:41361...\n", + "[INFO]2026-03-27 07:38:03+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:03+0800: Processor[622087] connecting to object storage at tcp://127.0.0.1:41361...\n", + "[INFO]2026-03-27 07:38:03+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:03+0800: Processor[622092] connecting to object storage at tcp://127.0.0.1:41361...\n", + "[INFO]2026-03-27 07:38:03+0800: balancing task: {WorkerID(Worker|FIX|cb6ec69444514a829e35fd1dd4b942f1|fc54532e06b34089b1f3a71af823ca15): 3}\n", + "[INFO]2026-03-27 07:38:03+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 07:38:03+0800: Processor[622093] connecting to object storage at tcp://127.0.0.1:41361...\n", + "[INFO]2026-03-27 07:38:05+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:38:05+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:38:05+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:38:05+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:38:36+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:38:37+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:38:43+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:38:48+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 07:39:25+0800: ClientID(Client|6d921406f67b419390861fc3ae1c4784) disconnected\n", + "[INFO]2026-03-27 07:39:25+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:39:25+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:39:25+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:39:25+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 07:39:25+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:39:25+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:39:25+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:39:25+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:39:25+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:39:25+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:39:25+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:39:25+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 07:39:25+0800: WorkerID(Worker|FIX|52317e2b092849e58667d2041d15188c|a1bd45d8988e47f2a2bd0f0aa26661dd) disconnected\n", + "[INFO]2026-03-27 07:39:25+0800: WorkerID(Worker|FIX|5df701cdddba43b3a09dd93abdbd3e17|b7fab4702f8f4c4bbdcd9853e194b0f6) disconnected\n", + "[INFO]2026-03-27 07:39:25+0800: WorkerID(Worker|FIX|0458313bb4f843a395d348eb35b5bbf8|2b3d4cbd94f24d148ec18bbf3de31c34) disconnected\n", + "[INFO]2026-03-27 07:39:25+0800: WorkerID(Worker|FIX|cb6ec69444514a829e35fd1dd4b942f1|fc54532e06b34089b1f3a71af823ca15) disconnected\n", + "[INFO]2026-03-27 07:39:26+0800: WorkerID(Worker|FIX|0458313bb4f843a395d348eb35b5bbf8|2b3d4cbd94f24d148ec18bbf3de31c34): stop Processor[622086], reason: quit\n", + "[INFO]2026-03-27 07:39:26+0800: WorkerID(Worker|FIX|0458313bb4f843a395d348eb35b5bbf8|2b3d4cbd94f24d148ec18bbf3de31c34): quit\n", + "[INFO]2026-03-27 07:39:26+0800: WorkerID(Worker|FIX|5df701cdddba43b3a09dd93abdbd3e17|b7fab4702f8f4c4bbdcd9853e194b0f6): stop Processor[622092], reason: quit\n", + "[INFO]2026-03-27 07:39:26+0800: WorkerID(Worker|FIX|5df701cdddba43b3a09dd93abdbd3e17|b7fab4702f8f4c4bbdcd9853e194b0f6): quit\n", + "[INFO]2026-03-27 07:39:26+0800: WorkerID(Worker|FIX|52317e2b092849e58667d2041d15188c|a1bd45d8988e47f2a2bd0f0aa26661dd): stop Processor[622093], reason: quit\n", + "[INFO]2026-03-27 07:39:26+0800: WorkerID(Worker|FIX|52317e2b092849e58667d2041d15188c|a1bd45d8988e47f2a2bd0f0aa26661dd): quit\n", + "[INFO]2026-03-27 07:39:26+0800: WorkerID(Worker|FIX|cb6ec69444514a829e35fd1dd4b942f1|fc54532e06b34089b1f3a71af823ca15): stop Processor[622087], reason: quit\n", + "[INFO]2026-03-27 07:39:26+0800: WorkerID(Worker|FIX|cb6ec69444514a829e35fd1dd4b942f1|fc54532e06b34089b1f3a71af823ca15): quit\n", + "[INFO]2026-03-27 07:39:26+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 07:39:26+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 07:39:26+0800: VanillaGraphTaskController: exited\n", + "[INFO]2026-03-27 07:39:26+0800: VanillaBalanceController: exited\n", + "[INFO]2026-03-27 07:39:26+0800: VanillaClientController: exited\n", + "[INFO]2026-03-27 07:39:26+0800: VanillaObjectController: exited\n", + "[INFO]2026-03-27 07:39:26+0800: VanillaWorkerController: exited\n", + "[INFO]2026-03-27 07:39:26+0800: WorkerManagerController: exited\n", + "[INFO]2026-03-27 07:39:26+0800: VanillaInformationController: exited\n", + "[INFO]2026-03-27 07:39:26+0800: ObjectStorageServer: stopped by user\n", + "Parallel: 85.39s\n", + "Speedup: 2.01x\n" + ] + } + ], + "source": [ + "curves = [\n", + " (\"curve_A\", 0.010),\n", + " (\"curve_B\", 0.015),\n", + " (\"curve_C\", 0.005),\n", + " (\"curve_D\", 0.020),\n", + " (\"curve_E\", 0.008),\n", + " (\"curve_F\", 0.012),\n", + " (\"curve_G\", 0.018),\n", + " (\"curve_H\", 0.003),\n", + "]\n", + "\n", + "start = time.time()\n", + "results_hw_seq = [calibrate_hw(d) for d in curves]\n", + "hw_seq_time = time.time() - start\n", + "print(f\"Sequential: {hw_seq_time:.2f}s\")\n", + "\n", + "start = time.time()\n", + "with pf.set_parallel_backend_context(\"scaler_local\", n_workers=4):\n", + " results_hw = calibrate_hw_parallel(curves)\n", + "hw_par_time = time.time() - start\n", + "print(f\"Parallel: {hw_par_time:.2f}s\")\n", + "print(f\"Speedup: {hw_seq_time / hw_par_time:.2f}x\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.14.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/slides/parfun_colebrook_demo.ipynb b/slides/parfun_colebrook_demo.ipynb new file mode 100644 index 000000000..0351c15cf --- /dev/null +++ b/slides/parfun_colebrook_demo.ipynb @@ -0,0 +1,326 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6ccfe13a", + "metadata": {}, + "source": [ + "# Parallel Colebrook-White Friction Factor Solver with ParFun\n", + "\n", + "The **Colebrook-White equation** is the industry-standard formula for computing the **Darcy-Weisbach friction factor** in turbulent pipe flow:\n", + "\n", + "$$\n", + "\\frac{1}{\\sqrt{f}} = -2 \\log_{10}\\!\\left(\\frac{\\varepsilon/D}{3.7} + \\frac{2.51}{\\text{Re}\\sqrt{f}}\\right)\n", + "$$\n", + "\n", + "Because $f$ appears on both sides, there is no closed-form solution -- it must be solved numerically.\n", + "We solve it for **500,000 pipe segments** (a large municipal water network) using Brent's method with **20 initial guesses** each, giving **10,000,000 total solves**.\n", + "\n", + "ParFun parallelises this with minimal code changes." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "5f295151", + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "from typing import List, Optional\n", + "\n", + "import numpy as np\n", + "from scipy.optimize import brentq\n", + "\n", + "import parfun as pf\n", + "\n", + "np.random.seed(42)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "3374461b", + "metadata": {}, + "outputs": [], + "source": [ + "def colebrook_residual(f: float, Re: float, rel_roughness: float) -> float:\n", + " \"\"\"Returns g(f) = 0 at the true friction factor.\"\"\"\n", + " if f <= 0:\n", + " return np.inf\n", + " return 1.0 / np.sqrt(f) + 2.0 * np.log10(rel_roughness / 3.7 + 2.51 / (Re * np.sqrt(f)))\n", + "\n", + "\n", + "def solve_single_pipe(Re: float, rel_roughness: float, n_guesses: int = 20) -> Optional[float]:\n", + " \"\"\"Solve Colebrook-White for one pipe segment.\n", + "\n", + " Sweeps over n_guesses bracketed intervals and accepts the first convergent root.\n", + " Returns the friction factor f, or NaN if no solution found.\n", + " \"\"\"\n", + " f_candidates = np.logspace(np.log10(0.008), np.log10(0.1), n_guesses + 1)\n", + "\n", + " for i in range(len(f_candidates) - 1):\n", + " fa = f_candidates[i]\n", + " fb = f_candidates[i + 1]\n", + " ga = colebrook_residual(fa, Re, rel_roughness)\n", + " gb = colebrook_residual(fb, Re, rel_roughness)\n", + "\n", + " if np.isfinite(ga) and np.isfinite(gb) and ga * gb < 0:\n", + " try:\n", + " root = brentq(colebrook_residual, fa, fb,\n", + " args=(Re, rel_roughness), xtol=1e-10, maxiter=200)\n", + " return root\n", + " except Exception:\n", + " continue\n", + " return np.nan\n", + "\n", + "\n", + "def solve_network_serial(\n", + " re_list: List[float],\n", + " roughness_list: List[float],\n", + " n_guesses: int = 20,\n", + ") -> List[float]:\n", + " \"\"\"Solve Colebrook-White for every pipe segment -- serial version.\"\"\"\n", + " results = []\n", + " for Re, eps in zip(re_list, roughness_list):\n", + " f = solve_single_pipe(Re, eps, n_guesses)\n", + " results.append(f if f is not None else np.nan)\n", + " return results\n", + "\n", + "\n", + "@pf.parfun(\n", + " split=pf.per_argument(\n", + " re_list=pf.py_list.by_chunk,\n", + " roughness_list=pf.py_list.by_chunk,\n", + " ),\n", + " combine_with=pf.py_list.concat,\n", + " fixed_partition_size=125_000,\n", + ")\n", + "def solve_network_parallel(\n", + " re_list: List[float],\n", + " roughness_list: List[float],\n", + " n_guesses: int = 20,\n", + ") -> List[float]:\n", + " \"\"\"Solve Colebrook-White for every pipe segment -- parallel version.\"\"\"\n", + " results = []\n", + " for Re, eps in zip(re_list, roughness_list):\n", + " f = solve_single_pipe(Re, eps, n_guesses)\n", + " results.append(f if f is not None else np.nan)\n", + " return results" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "2ed1f43f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pipeline network: 500,000 segments x 20 guesses = 10,000,000 solves\n", + "\n", + "Sequential: 67.13s\n", + "[INFO]2026-03-27 09:35:48+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:48+0800: ObjectStorageServer: start and listen to tcp://127.0.0.1:40359\n", + "[INFO]2026-03-27 09:35:48+0800: ObjectStorageServer: started\n", + "[INFO]2026-03-27 09:35:48+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:48+0800: use event loop: builtin\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: scheduler_address = tcp://127.0.0.1:52955\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: object_storage_address = tcp://127.0.0.1:40359\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: monitor_address = None\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: protected = True\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: max_number_of_tasks_waiting = -1\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: client_timeout_seconds = 60\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: worker_timeout_seconds = 60\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: object_retention_seconds = 60\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: load_balance_seconds = 1\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: load_balance_trigger_times = 2\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: event_loop = builtin\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: io_threads = 1\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: logging_config = LoggingConfig(paths=('/dev/stdout',), config_file=None, level='INFO')\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: policy = PolicyConfig(policy_engine_type='simple', policy_content='allocate=even_load; scaling=no')\n", + "[INFO]2026-03-27 09:35:48+0800: Scheduler: listen to scheduler address tcp://127.0.0.1:52955\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: updated `object_storage_address` from `tcp://127.0.0.1:40359` to `tcp://127.0.0.1:40359`\n", + "[INFO]2026-03-27 09:35:48+0800: Scheduler: connect to object storage server tcp://127.0.0.1:40359\n", + "[INFO]2026-03-27 09:35:48+0800: ConfigController: updated `monitor_address` from `None` to `tcp://127.0.0.1:52957`\n", + "[INFO]2026-03-27 09:35:48+0800: Scheduler: listen to scheduler monitor address tcp://127.0.0.1:52957\n", + "[INFO]2026-03-27 09:35:48+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 09:35:48+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 09:35:48+0800: VanillaGraphTaskController: started\n", + "[INFO]2026-03-27 09:35:48+0800: VanillaBalanceController: started\n", + "[INFO]2026-03-27 09:35:48+0800: VanillaClientController: started\n", + "[INFO]2026-03-27 09:35:48+0800: VanillaObjectController: started\n", + "[INFO]2026-03-27 09:35:48+0800: VanillaWorkerController: started\n", + "[INFO]2026-03-27 09:35:48+0800: WorkerManagerController: started\n", + "[INFO]2026-03-27 09:35:48+0800: VanillaInformationController: started\n", + "[INFO]2026-03-27 09:35:49+0800: ClientID(Client|8edee766b08440569636bbcf8d769937) connected\n", + "[INFO]2026-03-27 09:35:49+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:49+0800: use event loop: builtin\n", + "[INFO]2026-03-27 09:35:49+0800: WorkerID(Worker|FIX|db330b97f28e4fa29e75f97bcfce96ee|6fabca49a77e415fafd3ae77651939b9): start Processor[626541]\n", + "[INFO]2026-03-27 09:35:49+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 09:35:49+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 09:35:49+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: worker WorkerID(Worker|FIX|db330b97f28e4fa29e75f97bcfce96ee|6fabca49a77e415fafd3ae77651939b9) connected\n", + "[INFO]2026-03-27 09:35:49+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:49+0800: use event loop: builtin\n", + "[INFO]2026-03-27 09:35:49+0800: WorkerID(Worker|FIX|64ac81fe8874412691cfbf181a556269|2699f1e07e214b7dae1e4400686db4c5): start Processor[626544]\n", + "[INFO]2026-03-27 09:35:49+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 09:35:49+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 09:35:49+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: worker WorkerID(Worker|FIX|64ac81fe8874412691cfbf181a556269|2699f1e07e214b7dae1e4400686db4c5) connected\n", + "[INFO]2026-03-27 09:35:49+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:49+0800: use event loop: builtin\n", + "[INFO]2026-03-27 09:35:49+0800: WorkerID(Worker|FIX|4fc77069dec14ac8afdc9f25b775f589|5c1d80a2f7344efab1b3dc5871cc951b): start Processor[626547]\n", + "[INFO]2026-03-27 09:35:49+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 09:35:49+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 09:35:49+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: worker WorkerID(Worker|FIX|4fc77069dec14ac8afdc9f25b775f589|5c1d80a2f7344efab1b3dc5871cc951b) connected\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:49+0800: use event loop: builtin\n", + "[INFO]2026-03-27 09:35:49+0800: WorkerID(Worker|FIX|088dfcd5d42e49e69e8d2036de0294a1|b3fd1ec00cc34e6e81cf17db72594102): start Processor[626550]\n", + "[INFO]2026-03-27 09:35:49+0800: ZMQAsyncConnector: started\n", + "[INFO]2026-03-27 09:35:49+0800: PyAsyncObjectStorageConnector: started\n", + "[INFO]2026-03-27 09:35:49+0800: ZMQAsyncBinder: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaHeartbeatManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: worker WorkerID(Worker|FIX|088dfcd5d42e49e69e8d2036de0294a1|b3fd1ec00cc34e6e81cf17db72594102) connected\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaTimeoutManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaTaskManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: VanillaProfilingManager: started\n", + "[INFO]2026-03-27 09:35:49+0800: balancing task: {WorkerID(Worker|FIX|db330b97f28e4fa29e75f97bcfce96ee|6fabca49a77e415fafd3ae77651939b9): 2}\n", + "[INFO]2026-03-27 09:35:50+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:50+0800: Processor[626541] connecting to object storage at tcp://127.0.0.1:40359...\n", + "[INFO]2026-03-27 09:35:50+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:50+0800: Processor[626544] connecting to object storage at tcp://127.0.0.1:40359...\n", + "[INFO]2026-03-27 09:35:50+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:50+0800: Processor[626550] connecting to object storage at tcp://127.0.0.1:40359...\n", + "[INFO]2026-03-27 09:35:50+0800: logging to ('/dev/stdout',)\n", + "[INFO]2026-03-27 09:35:50+0800: Processor[626547] connecting to object storage at tcp://127.0.0.1:40359...\n", + "[INFO]2026-03-27 09:35:52+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 09:35:52+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 09:35:52+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 09:35:52+0800: Set up parallel backend: ScalerLocalBackend\n", + "[INFO]2026-03-27 09:36:24+0800: ClientID(Client|8edee766b08440569636bbcf8d769937) disconnected\n", + "[INFO]2026-03-27 09:36:24+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 09:36:24+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 09:36:24+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 09:36:24+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 09:36:24+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 09:36:24+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 09:36:24+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 09:36:24+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: ZMQAsyncConnector: exited\n", + "[INFO]2026-03-27 09:36:24+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 09:36:24+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaHeartbeatManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaTimeoutManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaTaskManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: VanillaProfilingManager: exited\n", + "[INFO]2026-03-27 09:36:24+0800: WorkerID(Worker|FIX|088dfcd5d42e49e69e8d2036de0294a1|b3fd1ec00cc34e6e81cf17db72594102) disconnected\n", + "[INFO]2026-03-27 09:36:24+0800: WorkerID(Worker|FIX|64ac81fe8874412691cfbf181a556269|2699f1e07e214b7dae1e4400686db4c5) disconnected\n", + "[INFO]2026-03-27 09:36:24+0800: WorkerID(Worker|FIX|4fc77069dec14ac8afdc9f25b775f589|5c1d80a2f7344efab1b3dc5871cc951b) disconnected\n", + "[INFO]2026-03-27 09:36:24+0800: WorkerID(Worker|FIX|db330b97f28e4fa29e75f97bcfce96ee|6fabca49a77e415fafd3ae77651939b9) disconnected\n", + "[INFO]2026-03-27 09:36:25+0800: WorkerID(Worker|FIX|db330b97f28e4fa29e75f97bcfce96ee|6fabca49a77e415fafd3ae77651939b9): stop Processor[626541], reason: quit\n", + "[INFO]2026-03-27 09:36:25+0800: WorkerID(Worker|FIX|088dfcd5d42e49e69e8d2036de0294a1|b3fd1ec00cc34e6e81cf17db72594102): stop Processor[626550], reason: quit\n", + "[INFO]2026-03-27 09:36:25+0800: WorkerID(Worker|FIX|088dfcd5d42e49e69e8d2036de0294a1|b3fd1ec00cc34e6e81cf17db72594102): quit\n", + "[INFO]2026-03-27 09:36:25+0800: WorkerID(Worker|FIX|db330b97f28e4fa29e75f97bcfce96ee|6fabca49a77e415fafd3ae77651939b9): quit\n", + "[INFO]2026-03-27 09:36:25+0800: WorkerID(Worker|FIX|64ac81fe8874412691cfbf181a556269|2699f1e07e214b7dae1e4400686db4c5): stop Processor[626544], reason: quit\n", + "[INFO]2026-03-27 09:36:25+0800: WorkerID(Worker|FIX|64ac81fe8874412691cfbf181a556269|2699f1e07e214b7dae1e4400686db4c5): quit\n", + "[INFO]2026-03-27 09:36:25+0800: WorkerID(Worker|FIX|4fc77069dec14ac8afdc9f25b775f589|5c1d80a2f7344efab1b3dc5871cc951b): stop Processor[626547], reason: quit\n", + "[INFO]2026-03-27 09:36:25+0800: WorkerID(Worker|FIX|4fc77069dec14ac8afdc9f25b775f589|5c1d80a2f7344efab1b3dc5871cc951b): quit\n", + "[INFO]2026-03-27 09:36:25+0800: ZMQAsyncBinder: exited\n", + "[INFO]2026-03-27 09:36:25+0800: PyAsyncObjectStorageConnector: exited\n", + "[INFO]2026-03-27 09:36:25+0800: VanillaGraphTaskController: exited\n", + "[INFO]2026-03-27 09:36:25+0800: VanillaBalanceController: exited\n", + "[INFO]2026-03-27 09:36:25+0800: VanillaClientController: exited\n", + "[INFO]2026-03-27 09:36:25+0800: VanillaObjectController: exited\n", + "[INFO]2026-03-27 09:36:25+0800: VanillaWorkerController: exited\n", + "[INFO]2026-03-27 09:36:25+0800: WorkerManagerController: exited\n", + "[INFO]2026-03-27 09:36:25+0800: VanillaInformationController: exited\n", + "[INFO]2026-03-27 09:36:25+0800: ObjectStorageServer: stopped by user\n", + "Parallel: 37.53s\n", + "Speedup: 1.79x\n", + "\n", + "Max abs diff: 0.00e+00\n", + "Pipes solved: 500,000 / 500,000\n" + ] + } + ], + "source": [ + "N_PIPES = 500_000\n", + "N_GUESSES = 20\n", + "\n", + "re_values = np.random.uniform(4_000, 2_000_000, N_PIPES).tolist()\n", + "roughness_values = np.random.uniform(0.0001, 0.05, N_PIPES).tolist()\n", + "\n", + "print(f\"Pipeline network: {N_PIPES:,} segments x {N_GUESSES} guesses = {N_PIPES * N_GUESSES:,} solves\")\n", + "\n", + "start = time.time()\n", + "serial_results = solve_network_serial(re_values, roughness_values, N_GUESSES)\n", + "seq_time = time.time() - start\n", + "print(f\"\\nSequential: {seq_time:.2f}s\")\n", + "\n", + "start = time.time()\n", + "with pf.set_parallel_backend_context(\"scaler_local\", n_workers=4):\n", + " parallel_results = solve_network_parallel(re_values, roughness_values, N_GUESSES)\n", + "par_time = time.time() - start\n", + "print(f\"Parallel: {par_time:.2f}s\")\n", + "print(f\"Speedup: {seq_time / par_time:.2f}x\")\n", + "\n", + "serial_arr = np.array(serial_results)\n", + "parallel_arr = np.array(parallel_results)\n", + "max_diff = np.nanmax(np.abs(serial_arr - parallel_arr))\n", + "print(f\"\\nMax abs diff: {max_diff:.2e}\")\n", + "n_solved = np.sum(~np.isnan(serial_arr))\n", + "print(f\"Pipes solved: {n_solved:,} / {N_PIPES:,}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.14.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}