diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 580e5c1ae..7e9d9ca52 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -23,52 +23,13 @@ jobs: # Checkout the WecOptTool repo - uses: actions/checkout@v3 - # Cache the conda environment >>> - # The cache key includes the OS, the date, the hash of the pyproject.toml file, and the cache number. - # A new cache is created if: - # - this is the first time today that this file is run (date changes) - # - the content of pyproject.toml changes - # - you manually change the value of the CACHE_NUMBER below - # Else the existing cache is used. + # Caching of environment has been removed for simplicity - name: Setup Miniforge uses: conda-incubator/setup-miniconda@v2 with: miniforge-variant: Miniforge3 miniforge-version: latest activate-environment: test-env - use-mamba: true - - # save the date to include in the cache key - - name: Get Date - id: get-date - run: echo "DATE=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_ENV - shell: bash - - # create a conda yaml file - - name: Create environment.yml file - run: | - echo "name: test-env" >> environment.yml; - echo " " >> environment.yml - echo "dependencies:" >> environment.yml - echo " - python=${{ matrix.python-version }}" >> environment.yml - echo " - capytaine" >> environment.yml - echo " - wavespectra" >> environment.yml - cat environment.yml - - # use the cache if it exists - - uses: actions/cache@v3 - env: - CACHE_NUMBER: 0 # increase to reset cache manually - with: - path: ${{ env.CONDA }}/envs - key: conda-${{ runner.os }}--${{ runner.arch }}--${{ env.DATE }}-${{ hashFiles('pyproject.toml') }}-${{ env.CACHE_NUMBER }} - id: cache - - # if cache key has changed, create new cache - - name: Update environment - run: mamba env update -n test-env -f environment.yml - if: steps.cache.outputs.cache-hit != 'true' - # <<< Cache the conda environment # install libglu on ubuntu. - name: Install libglu @@ -81,13 +42,13 @@ jobs: run: | conda activate test-env python3 -m pip install --upgrade pip - pip3 install gmsh pygmsh coveralls pytest + pip3 install gmsh pygmsh coveralls pytest pytest-cov pip3 install . # run all tests & coverage - name: Run Test shell: bash -l {0} - run: coverage run -m pytest + run: pytest --cov=wecopttool --cov-report=xml # upload coverage data - name: Upload coverage data to coveralls.io diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 5eaf1e45a..c629bc69e 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -23,52 +23,12 @@ jobs: # Checkout the WecOptTool repo - uses: actions/checkout@v3 - # Cache the conda environment >>> - # The cache key includes the OS, the date, the hash of the pyproject.toml file, and the cache number. - # A new cache is created if: - # - this is the first time today that this file is run (date changes) - # - the content of pyproject.toml changes - # - you manually change the value of the CACHE_NUMBER below - # Else the existing cache is used. - name: Setup Miniforge uses: conda-incubator/setup-miniconda@v2 with: miniforge-variant: Miniforge3 miniforge-version: latest activate-environment: test-env - use-mamba: true - - # save the date to include in the cache key - - name: Get Date - id: get-date - run: echo "DATE=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_ENV - shell: bash - - # create a conda yaml file - - name: Create environment.yml file - run: | - echo "name: test-env" >> environment.yml; - echo " " >> environment.yml - echo "dependencies:" >> environment.yml - echo " - python=${{ matrix.python-version }}" >> environment.yml - echo " - capytaine" >> environment.yml - echo " - wavespectra" >> environment.yml - cat environment.yml - - # use the cache if it exists - - uses: actions/cache@v3 - env: - CACHE_NUMBER: 0 # increase to reset cache manually - with: - path: ${{ env.CONDA }}/envs - key: conda-${{ runner.os }}--${{ runner.arch }}--${{ env.DATE }}-${{ hashFiles('pyproject.toml') }}-${{ env.CACHE_NUMBER }} - id: cache - - # if cache key has changed, create new cache - - name: Update environment - run: mamba env update -n test-env -f environment.yml - if: steps.cache.outputs.cache-hit != 'true' - # <<< Cache the conda environment # install libglu on ubuntu. - name: Install libglu @@ -81,13 +41,13 @@ jobs: run: | conda activate test-env python3 -m pip install --upgrade pip - pip3 install gmsh pygmsh coveralls pytest + pip3 install gmsh pygmsh coveralls pytest pytest-cov pip3 install . # run all tests & coverage - name: Run Test shell: bash -l {0} - run: coverage run -m pytest + run: pytest --cov=wecopttool --cov-report=xml # upload coverage data - name: Upload coverage data to coveralls.io diff --git a/examples/tutorial_1_WaveBot.ipynb b/examples/tutorial_1_WaveBot.ipynb index 3a90c6bd9..5b6cda6df 100644 --- a/examples/tutorial_1_WaveBot.ipynb +++ b/examples/tutorial_1_WaveBot.ipynb @@ -30,7 +30,8 @@ "metadata": {}, "outputs": [], "source": [ - "import autograd.numpy as np\n", + "import numpy as np\n", + "import jax.numpy as jnp\n", "import capytaine as cpy\n", "from capytaine.io.meshio import load_from_meshio\n", "import matplotlib.pyplot as plt\n", @@ -372,7 +373,7 @@ "\n", "def const_f_pto(wec, x_wec, x_opt, wave): # Format for scipy.optimize.minimize\n", " f = pto.force(wec, x_wec, x_opt, wave, nsubsteps)\n", - " return f_max - np.abs(f.flatten())\n", + " return f_max - jnp.abs(f.flatten())\n", "\n", "ineq_cons = {'type': 'ineq',\n", " 'fun': const_f_pto,\n", @@ -706,7 +707,7 @@ "## Update PTO constraints and forcing\n", "def const_f_pto_2(wec, x_wec, x_opt, wave):\n", " f = pto_2.force_on_wec(wec, x_wec, x_opt, wave, nsubsteps)\n", - " return f_max - np.abs(f.flatten())\n", + " return f_max - jnp.abs(f.flatten())\n", "ineq_cons_2 = {'type': 'ineq', 'fun': const_f_pto_2}\n", "constraints_2 = [ineq_cons_2]\n", "f_add_2 = {'PTO': pto_2.force_on_wec}" @@ -989,7 +990,7 @@ "\n", " def const_f_pto(wec, x_wec, x_opt, wave):\n", " f = pto.force(wec, x_wec, x_opt, wave, nsubsteps)\n", - " return f_max - np.abs(f.flatten())\n", + " return f_max - jnp.abs(f.flatten())\n", "\n", " ineq_cons = {'type': 'ineq', 'fun': const_f_pto}\n", " constraints = [ineq_cons]\n", diff --git a/examples/tutorial_2_AquaHarmonics.ipynb b/examples/tutorial_2_AquaHarmonics.ipynb index f8c704ea3..d2b1a009c 100644 --- a/examples/tutorial_2_AquaHarmonics.ipynb +++ b/examples/tutorial_2_AquaHarmonics.ipynb @@ -24,7 +24,8 @@ "source": [ "import capytaine as cpy\n", "from capytaine.io.meshio import load_from_meshio\n", - "import autograd.numpy as np\n", + "import numpy as np\n", + "import jax.numpy as jnp\n", "import matplotlib.pyplot as plt\n", "from matplotlib import cm\n", "from scipy.optimize import brute\n", @@ -376,10 +377,10 @@ "source": [ "def f_buoyancy(wec, x_wec, x_opt, wave, nsubsteps=1):\n", " \"\"\"Only the zeroth order component (doesn't include linear stiffness)\"\"\"\n", - " return displacement * rho * g * np.ones([wec.ncomponents*nsubsteps, wec.ndof])\n", + " return displacement * rho * g * jnp.ones([wec.ncomponents*nsubsteps, wec.ndof])\n", "\n", "def f_gravity(wec, x_wec, x_opt, wave, nsubsteps=1):\n", - " return -1 * wec.inertia_matrix.item() * g * np.ones([wec.ncomponents*nsubsteps, wec.ndof])\n", + " return -1 * wec.inertia_matrix.item() * g * jnp.ones([wec.ncomponents*nsubsteps, wec.ndof])\n", "\n", "def f_pretension_wec(wec, x_wec, x_opt, wave, nsubsteps=1):\n", " \"\"\"Pretension force as it acts on the WEC\"\"\"\n", @@ -389,18 +390,18 @@ "\n", "def f_pto_passive(wec, x_wec, x_opt, wave, nsubsteps=1):\n", " pos = wec.vec_to_dofmat(x_wec)\n", - " vel = np.dot(wec.derivative_mat,pos)\n", - " acc = np.dot(wec.derivative_mat, vel)\n", + " vel = jnp.dot(wec.derivative_mat,pos)\n", + " acc = jnp.dot(wec.derivative_mat, vel)\n", " time_matrix = wec.time_mat_nsubsteps(nsubsteps)\n", " spring = -(gear_ratios['spring']*airspring['gamma']*airspring['area']*\n", " airspring['press_init']/airspring['vol_init']) * pos\n", - " f_spring = np.dot(time_matrix,spring)\n", + " f_spring = jnp.dot(time_matrix,spring)\n", " fric = -(friction_pto + \n", " friction['Bpneumatic_spring_static1']*\n", " gear_ratios['spring']) * vel\n", - " f_fric = np.dot(time_matrix,fric)\n", + " f_fric = jnp.dot(time_matrix,fric)\n", " inertia = inertia_pto * acc\n", - " f_inertia = np.dot(time_matrix,inertia)\n", + " f_inertia = jnp.dot(time_matrix,inertia)\n", " return f_spring + f_fric + f_inertia\n", "\n", "def f_pto_line(wec, x_wec, x_opt, wave, nsubsteps=1):\n", @@ -452,18 +453,18 @@ "\n", "def const_peak_torque_pto(wec, x_wec, x_opt, wave):\n", " torque = pto.force(wec, x_wec, x_opt, wave, nsubsteps)\n", - " return torque_peak_max - np.abs(torque.flatten())\n", + " return torque_peak_max - jnp.abs(torque.flatten())\n", "\n", "def const_speed_pto(wec, x_wec, x_opt, wave):\n", " rot_vel = pto.velocity(wec, x_wec, x_opt, wave, nsubsteps)\n", - " return rot_speed_max - np.abs(rot_vel.flatten())\n", + " return rot_speed_max - jnp.abs(rot_vel.flatten())\n", "\n", "def const_power_pto(wec, x_wec, x_opt, wave):\n", " power_mech = (\n", " pto.velocity(wec, x_wec, x_opt, wave, nsubsteps) *\n", " pto.force(wec, x_wec, x_opt, wave, nsubsteps)\n", " )\n", - " return power_max - np.abs(power_mech.flatten())\n", + " return power_max - jnp.abs(power_mech.flatten())\n", "\n", "def constrain_min_tension(wec, x_wec, x_opt, wave):\n", " total_tension = -1*f_pto_line(wec, x_wec, x_opt, wave, nsubsteps)\n", diff --git a/examples/tutorial_3_LUPA.ipynb b/examples/tutorial_3_LUPA.ipynb index 66dc4e675..a9a93dbce 100644 --- a/examples/tutorial_3_LUPA.ipynb +++ b/examples/tutorial_3_LUPA.ipynb @@ -40,7 +40,8 @@ "import gmsh, pygmsh\n", "import capytaine as cpy\n", "from capytaine.io.meshio import load_from_meshio\n", - "import autograd.numpy as np\n", + "import numpy as np\n", + "import jax.numpy as jnp\n", "import matplotlib.pyplot as plt\n", "import xarray as xr\n", "from scipy.optimize import brute\n", @@ -766,8 +767,8 @@ "# maximum stroke\n", "stroke_max = 0.5 # m\n", "def const_stroke_pto(wec, x_wec, x_opt, wave): \n", - " pos = pto.position(wec, x_wec, x_opt, wave, nsubsteps)\n", - " return stroke_max - np.abs(pos.flatten())\n", + " pos = pto.position(wec, x_wec, x_opt, waves, nsubsteps)\n", + " return stroke_max - jnp.abs(pos.flatten())\n", "\n", "## GENERATOR\n", "# peak torque\n", @@ -776,20 +777,20 @@ " \"\"\"Instantaneous torque must not exceed max torque Tmax - |T| >=0 \n", " \"\"\"\n", " torque = pto.force(wec, x_wec, x_opt, wave, nsubsteps) / gear_ratio(radius)\n", - " return generator['max_torque'] - np.abs(torque.flatten())\n", + " return generator['max_torque'] - jnp.abs(torque.flatten())\n", "\n", "# continuous torque\n", "def const_torque_pto(wec, x_wec, x_opt, wave, radius=default_radius): \n", " \"\"\"RMS torque must not exceed max continous torque \n", " Tmax_conti - Trms >=0 \"\"\"\n", " torque = pto.force(wec, x_wec, x_opt, wave, nsubsteps) / gear_ratio(radius)\n", - " torque_rms = np.sqrt(np.mean(torque.flatten()**2))\n", + " torque_rms = jnp.sqrt(jnp.mean(torque.flatten()**2))\n", " return generator['continuous_torque'] - torque_rms\n", "\n", "# max speed\n", "def const_speed_pto(wec, x_wec, x_opt, wave, radius=default_radius): \n", " rot_vel = pto.velocity(wec, x_wec, x_opt, wave, nsubsteps) * gear_ratio(radius)\n", - " return generator['max_speed'] - np.abs(rot_vel.flatten())\n", + " return generator['max_speed'] - jnp.abs(rot_vel.flatten())\n", "\n", "## Constraints\n", "constraints = [\n", @@ -1263,7 +1264,7 @@ " stroke_max = 0.5 # m\n", " def const_stroke_pto(wec, x_wec, x_opt, wave): \n", " pos = pto.position(wec, x_wec, x_opt, wave, nsubsteps)\n", - " return stroke_max - np.abs(pos.flatten())\n", + " return stroke_max - jnp.abs(pos.flatten())\n", "\n", " ## GENERATOR\n", " # peak torque\n", @@ -1272,20 +1273,20 @@ " \"\"\"Instantaneous torque must not exceed max torque Tmax - |T| >=0 \n", " \"\"\"\n", " torque = pto.force(wec, x_wec, x_opt, wave, nsubsteps) / gear_ratio(radius)\n", - " return generator['max_torque'] - np.abs(torque.flatten())\n", + " return generator['max_torque'] - jnp.abs(torque.flatten())\n", "\n", " # continuous torque\n", " def const_torque_pto(wec, x_wec, x_opt, wave): \n", " \"\"\"RMS torque must not exceed max continous torque \n", " Tmax_conti - Trms >=0 \"\"\"\n", " torque = pto.force(wec, x_wec, x_opt, wave, nsubsteps) / gear_ratio(radius)\n", - " torque_rms = np.sqrt(np.mean(torque.flatten()**2))\n", + " torque_rms = jnp.sqrt(jnp.mean(torque.flatten()**2))\n", " return generator['continuous_torque'] - torque_rms\n", "\n", " # max speed\n", " def const_speed_pto(wec, x_wec, x_opt, wave): \n", " rot_vel = pto.velocity(wec, x_wec, x_opt, wave, nsubsteps) * gear_ratio(radius)\n", - " return generator['max_speed'] - np.abs(rot_vel.flatten())\n", + " return generator['max_speed'] - jnp.abs(rot_vel.flatten())\n", "\n", " ## Constraints\n", " constraints = [\n", diff --git a/examples/tutorial_4_Pioneer.ipynb b/examples/tutorial_4_Pioneer.ipynb index 1a00cb33f..96a065370 100644 --- a/examples/tutorial_4_Pioneer.ipynb +++ b/examples/tutorial_4_Pioneer.ipynb @@ -36,7 +36,8 @@ "source": [ "import capytaine as cpy\n", "from capytaine.io.meshio import load_from_meshio\n", - "import autograd.numpy as np\n", + "import numpy as np\n", + "import jax.numpy as jnp\n", "import matplotlib.pyplot as plt\n", "from scipy.linalg import block_diag\n", "import xarray as xr\n", @@ -445,13 +446,13 @@ "def rel_position(wec, x_wec, x_opt, wave, nsubsteps=1):\n", " pos_rel = x_rel(wec, x_wec, x_opt)\n", " time_matrix = wec.time_mat_nsubsteps(nsubsteps)\n", - " return np.dot(time_matrix, pos_rel)\n", + " return jnp.dot(time_matrix, pos_rel)\n", "\n", "def rel_velocity(wec, x_wec, x_opt, wave, nsubsteps=1):\n", " pos_rel = x_rel(wec, x_wec, x_opt)\n", - " vel_rel = np.dot(wec.derivative_mat, pos_rel)\n", + " vel_rel = jnp.dot(wec.derivative_mat, pos_rel)\n", " time_matrix = wec.time_mat_nsubsteps(nsubsteps)\n", - " return np.dot(time_matrix, vel_rel)" + " return jnp.dot(time_matrix, vel_rel)" ] }, { @@ -469,9 +470,9 @@ "outputs": [], "source": [ "def force_from_generator(wec, x_wec, x_opt, wave=None, nsubsteps=1):\n", - " f_fd = np.reshape(x_opt[:nstate_pto], (-1, ndof), order='F')\n", + " f_fd = jnp.reshape(x_opt[:nstate_pto], (-1, ndof), order='F')\n", " time_matrix = wec.time_mat_nsubsteps(nsubsteps)\n", - " torque = np.dot(time_matrix, f_fd) * flywheel_properties['motor_gear_ratio']\n", + " torque = jnp.dot(time_matrix, f_fd) * flywheel_properties['motor_gear_ratio']\n", " return torque" ] }, @@ -535,20 +536,20 @@ " e1_td = force_from_generator(wec, x_wec, x_opt, wave)\n", " q1 = wot.complex_to_real(wec.td_to_fd(q1_td, False))\n", " e1 = wot.complex_to_real(wec.td_to_fd(e1_td, False))\n", - " vars_1 = np.hstack([q1, e1])\n", + " vars_1 = jnp.hstack([q1, e1])\n", " vars_1_flat = wec.dofmat_to_vec(vars_1)\n", - " vars_2_flat = np.dot(pto._transfer_mat, vars_1_flat)\n", + " vars_2_flat = jnp.dot(pto._transfer_mat, vars_1_flat)\n", " vars_2 = wot.vec_to_dofmat(vars_2_flat, 2)\n", " q2 = vars_2[:, 0]\n", " e2 = vars_2[:, 1]\n", " time_mat = wec.time_mat_nsubsteps(nsubsteps)\n", - " q2_td = np.dot(time_mat, q2)\n", - " e2_td = np.dot(time_mat, e2)\n", + " q2_td = jnp.dot(time_mat, q2)\n", + " e2_td = jnp.dot(time_mat, e2)\n", " return q2_td * e2_td\n", "\n", "def energy(wec, x_wec, x_opt, wave, nsubsteps=1):\n", " power_td = electrical_power(wec, x_wec, x_opt, wave, nsubsteps)\n", - " return np.sum(power_td) * wec.dt/nsubsteps\n", + " return jnp.sum(power_td) * wec.dt/nsubsteps\n", "\n", "def average_electrical_power(wec, x_wec, x_opt, wave, nsubsteps=1):\n", " e = energy(wec, x_wec, x_opt, wave, nsubsteps)\n", @@ -577,7 +578,7 @@ "\n", "def constraint_max_generator_torque(wec, x_wec, x_opt, wave, nsubsteps=nsubsteps_constraints):\n", " torque = force_from_generator(wec, x_wec, x_opt, wave, nsubsteps)\n", - " return max_generator_torque - np.abs(torque.flatten())" + " return max_generator_torque - jnp.abs(torque.flatten())" ] }, { @@ -610,7 +611,7 @@ "def force_from_friction(wec, x_wec, x_opt, wave = None, nsubsteps = 1):\n", " rel_vel = rel_velocity(wec, x_wec, x_opt, wave, nsubsteps) * flywheel_properties['motor_gear_ratio']\n", " fric = -1*(\n", - " np.tanh(rel_vel)*flywheel_properties['coulomb_friction'] +\n", + " jnp.tanh(rel_vel)*flywheel_properties['coulomb_friction'] +\n", " rel_vel*flywheel_properties['viscous_friction']\n", " ) * flywheel_properties['motor_gear_ratio']\n", " return fric\n", @@ -624,7 +625,7 @@ "\n", "def nonlinear_spring(pos):\n", " # 135 deg nonlinear spring\n", - " spring_eq_pos_td = pos - np.pi\n", + " spring_eq_pos_td = pos - jnp.pi\n", " n = 12\n", " slope = 1/(2**(2*n))*comb(2*n,n)\n", " scale = 1/slope\n", @@ -632,7 +633,7 @@ " for ind in range(n):\n", " k = ind+1\n", " coeffs = comb(2*n, n-k)/(k*(2**(2*n-1)))\n", - " new_pos = new_pos - coeffs*np.sin(k*spring_eq_pos_td)\n", + " new_pos = new_pos - coeffs*jnp.sin(k*spring_eq_pos_td)\n", " return spring_properties['gear_ratio'] * -spring_properties['stiffness'] * scale * new_pos\n", "\n", "def force_from_nl_spring(wec, x_wec, x_opt, wave = None, nsubsteps = 1):\n", @@ -676,9 +677,9 @@ "source": [ "def flywheel_inertia(wec, x_wec, x_opt, wave = None, nsubsteps = 1):\n", " pos_fw = wec.vec_to_dofmat(x_opt[nstate_pto:])\n", - " acc_fw = np.dot(wec.derivative2_mat, pos_fw)\n", + " acc_fw = jnp.dot(wec.derivative2_mat, pos_fw)\n", " time_matrix = wec.time_mat_nsubsteps(nsubsteps)\n", - " acc_fw = np.dot(time_matrix, acc_fw)\n", + " acc_fw = jnp.dot(time_matrix, acc_fw)\n", " return flywheel_properties['MOI'] * acc_fw\n", "\n", "def flywheel_residual_lin_spring(wec, x_wec, x_opt, wave = None, nsubsteps = 1):\n", @@ -1072,7 +1073,7 @@ "outputs": [], "source": [ "import logging \n", - "logging.getLogger().setLevel(logging.DEBUG)\n", + "logging.getLogger().setLevel(logging.INFO)\n", "\n", "obj_fun = average_electrical_power\n", "results = wec_lin.solve(\n", diff --git a/pyproject.toml b/pyproject.toml index ccaa7e973..521bcc987 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ dependencies = [ "numpy>=1.20, <2.0", "scipy", "xarray", - "autograd", + "jax", "capytaine", "joblib", "wavespectra>=4.0", diff --git a/tests/test_core.py b/tests/test_core.py index 5146597e9..c37e0eed6 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -641,7 +641,7 @@ def test_error(self, vec): """Test that the function raises a ValueError if an incompatible number of degrees of freedom are specified. """ - with pytest.raises(ValueError): + with pytest.raises(Exception): wot.vec_to_dofmat(vec, 4) @@ -1061,7 +1061,7 @@ def omega(self, f1, nfreq_imp): @pytest.fixture(scope="class") def x_wec(self,): """WEC position state vector for a simple synthetic case.""" - return [0, 1, 1, 1, 0, 2, 2, 2] + return np.array([0, 1, 1, 1, 0, 2, 2, 2]) @pytest.fixture(scope="class") def force(self, f1, nfreq_imp, ndof_imp): diff --git a/tests/test_integration.py b/tests/test_integration.py index 7d33f410a..ff162b19a 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -5,7 +5,8 @@ import wecopttool as wot import capytaine as cpy from capytaine.io.meshio import load_from_meshio -import autograd.numpy as np +import numpy as np +import jax.numpy as jnp from scipy.optimize import Bounds import xarray as xr from wavespectra.construct.frequency import pierson_moskowitz @@ -302,7 +303,7 @@ def test_p_controller_resonant_wave(self, scale_x_opt=1e-3, scale_obj=1e-1, optim_options={'ftol': 1e-10}, - bounds_opt=((-1*np.infty, 0),), + bounds_opt=((-1*np.inf, 0),), ) power_sol = -1*res[0]['fun'] @@ -399,7 +400,7 @@ def test_saturated_pi_controller(self, def const_f_pto(wec, x_wec, x_opt, wave): f = pto['us'].force_on_wec(wec, x_wec, x_opt, wave, nsubsteps=4) - return f_max - np.abs(f.flatten()) + return f_max - jnp.abs(f.flatten()) wec['us'] = wot.WEC.from_bem(hydro_data, f_add={"PTO": pto['us'].force_on_wec}, constraints=[{'type': 'ineq', diff --git a/wecopttool/controllers.py b/wecopttool/controllers.py index 64d39eae4..42b3ff978 100644 --- a/wecopttool/controllers.py +++ b/wecopttool/controllers.py @@ -18,9 +18,9 @@ import logging from typing import Optional, TypeVar, Callable, Union -import autograd.numpy as np -from autograd.builtins import isinstance, tuple, list, dict -from autograd.numpy import ndarray +import numpy as np +import jax.numpy as jnp +from numpy import ndarray from scipy.linalg import block_diag from scipy.optimize import OptimizeResult from xarray import DataArray, Dataset @@ -100,21 +100,21 @@ def _gains(self, x_opt): ndof = self.ndof if self.proportional: - gain_p = np.diag(x_opt[idx*ndof:(idx+1)*ndof]) + gain_p = jnp.diag(jnp.array(x_opt[idx*ndof:(idx+1)*ndof])) idx = idx + 1 else: - gain_p = np.zeros([ndof, ndof]) + gain_p = jnp.zeros([ndof, ndof]) if self.integral: - gain_i = np.diag(x_opt[idx*ndof:(idx+1)*ndof]) + gain_i = jnp.diag(jnp.array(x_opt[idx*ndof:(idx+1)*ndof])) idx = idx + 1 else: - gain_i = np.zeros([ndof, ndof]) + gain_i = jnp.zeros([ndof, ndof]) if self.derivative: - gain_d = np.diag(x_opt[idx*ndof:(idx+1)*ndof]) + gain_d = jnp.diag(jnp.array(x_opt[idx*ndof:(idx+1)*ndof])) else: - gain_d = np.zeros([ndof, ndof]) + gain_d = jnp.zeros([ndof, ndof]) return gain_p, gain_i, gain_d @@ -155,9 +155,9 @@ def force(self, acc_td = pto.acceleration(wec, x_wec, x_opt, wave, nsubsteps) force_td = ( - np.dot(vel_td, gain_p.T) + - np.dot(pos_td, gain_i.T) + - np.dot(acc_td, gain_d.T) + jnp.dot(vel_td, gain_p.T) + + jnp.dot(pos_td, gain_i.T) + + jnp.dot(acc_td, gain_d.T) ) if self.saturation is not None: @@ -167,7 +167,7 @@ def force(self, def _apply_saturation(self, force_td): if self.saturation is not None: - saturation = np.atleast_2d(np.squeeze(self.saturation)) + saturation = jnp.atleast_2d(jnp.squeeze(jnp.array(self.saturation))) assert len(saturation)==self.ndof if len(saturation.shape) > 2: raise ValueError("`saturation` must have <= 2 dimensions.") @@ -181,9 +181,9 @@ def _apply_saturation(self, force_td): force_td_list = [] for i in range(self.ndof): - tmp = np.clip(force_td[:,i], f_min[i], f_max[i]) + tmp = jnp.clip(force_td[:,i], f_min[i], f_max[i]) force_td_list.append(tmp) - force_td = np.array(force_td_list).T + force_td = jnp.array(force_td_list).T return force_td @@ -223,8 +223,8 @@ def force(self, length. """ tmat = pto._tmat(wec, nsubsteps) - x_opt = np.reshape(x_opt[:len(tmat[0])*pto.ndof], (-1, pto.ndof), order='F') - return np.dot(tmat, x_opt) + x_opt = jnp.reshape(jnp.array(x_opt[:len(tmat[0])*pto.ndof]), (-1, pto.ndof), order='F') + return jnp.dot(tmat, x_opt) # utilities def nstate_unstructured(nfreq: int, ndof: int) -> int: diff --git a/wecopttool/core.py b/wecopttool/core.py index 43e7b15dc..5702a36a5 100644 --- a/wecopttool/core.py +++ b/wecopttool/core.py @@ -51,6 +51,7 @@ "frequency_parameters", "time_results", "set_fb_centers", + "block_diag_jax", ] @@ -61,16 +62,17 @@ from datetime import datetime from numpy.typing import ArrayLike -import autograd.numpy as np -from autograd.numpy import ndarray -from autograd.builtins import isinstance, tuple, list, dict -from autograd import grad, jacobian +import numpy as np +import jax.numpy as jnp +from numpy import ndarray +import jax import xarray as xr from xarray import DataArray, Dataset import capytaine as cpy from scipy.optimize import minimize, OptimizeResult, Bounds -from scipy.linalg import block_diag, dft +from scipy.linalg import dft +jax.config.update("jax_enable_x64", True) # logger _log = logging.getLogger(__name__) @@ -80,7 +82,7 @@ warnings.filterwarnings("ignore", message=filter_msg) # default values -_default_parameters = {'rho': 1025.0, 'g': 9.81, 'depth': np.infty} +_default_parameters = {'rho': 1025.0, 'g': 9.81, 'depth': np.inf} _default_min_damping = 1e-6 # type aliases @@ -654,7 +656,7 @@ def solve(self, See :py:func:`scipy.optimize.minimize`. use_grad If :python:`True`, optimization will utilize - `autograd `_ + `jax `_ for gradients. maximize Whether to maximize the objective function. @@ -723,14 +725,15 @@ def solve(self, scale_x_opt = scale_dofs([scale_x_opt], nstate_opt) # composite scaling vector - scale = np.concatenate([scale_x_wec, scale_x_opt]) + scale = jnp.concatenate([jnp.array(scale_x_wec), jnp.array(scale_x_opt)]) # decision variable initial guess + key = jax.random.PRNGKey(0) # could add key as input to select same initial guesses? if x_wec_0 is None: - x_wec_0 = np.random.randn(self.nstate_wec) + x_wec_0 = jax.random.normal(key, [self.nstate_wec], dtype=np.float64) if x_opt_0 is None: - x_opt_0 = np.random.randn(nstate_opt) - x0 = np.concatenate([x_wec_0, x_opt_0])*scale + x_opt_0 = jax.random.normal(key, [nstate_opt], dtype=np.float64) + x0 = jnp.concatenate([jnp.array(x_wec_0), jnp.array(x_opt_0)])*scale # bounds if (bounds_wec is None) and (bounds_opt is None): @@ -741,8 +744,8 @@ def solve(self, if isinstance(bii, tuple): bounds_in[idx] = Bounds(lb=[xibs[0] for xibs in bii], ub=[xibs[1] for xibs in bii]) - inf_wec = np.ones(self.nstate_wec)*np.inf - inf_opt = np.ones(nstate_opt)*np.inf + inf_wec = jnp.ones(self.nstate_wec)*jnp.inf + inf_opt = jnp.ones(nstate_opt)*jnp.inf bounds_dflt = [Bounds(lb=-inf_wec, ub=inf_wec), Bounds(lb=-inf_opt, ub=inf_opt)] bounds_list = [] @@ -752,8 +755,8 @@ def solve(self, else: bo = bd bounds_list.append(bo) - bounds = Bounds(lb=np.hstack([le.lb for le in bounds_list])*scale, - ub=np.hstack([le.ub for le in bounds_list])*scale) + bounds = Bounds(lb=jnp.hstack([le.lb for le in bounds_list])*scale, + ub=jnp.hstack([le.ub for le in bounds_list])*scale) for realization, wave in waves.groupby('realization'): @@ -784,9 +787,9 @@ def new_fun(x): return icons["fun"](self, x_wec, x_opt, wave) return new_fun - icons_new["fun"] = make_new_fun(icons) + icons_new["fun"] = jax.jit(make_new_fun(icons)) if use_grad: - icons_new['jac'] = jacobian(icons_new['fun']) + icons_new['jac'] = jax.jit(jax.jacobian(icons_new['fun'])) constraints[i] = icons_new # system dynamics through equality constraint, ma - Σf = 0 @@ -795,9 +798,9 @@ def scaled_resid_fun(x): x_wec, x_opt = self.decompose_state(x_s) return self.residual(x_wec, x_opt, wave) - eq_cons = {'type': 'eq', 'fun': scaled_resid_fun} + eq_cons = {'type': 'eq', 'fun': jax.jit(scaled_resid_fun)} if use_grad: - eq_cons['jac'] = jacobian(scaled_resid_fun) + eq_cons['jac'] = jax.jit(jax.jacobian(scaled_resid_fun)) constraints.append(eq_cons) # callback @@ -817,7 +820,7 @@ def callback_scipy(x): # optimization problem optim_options['disp'] = optim_options.get('disp', True) - problem = {'fun': obj_fun_scaled, + problem = {'fun': jax.jit(obj_fun_scaled), 'x0': x0, 'method': 'SLSQP', 'constraints': constraints, @@ -826,7 +829,7 @@ def callback_scipy(x): 'callback': callback_scipy, } if use_grad: - problem['jac'] = grad(obj_fun_scaled) + problem['jac'] = jax.jit(jax.grad(obj_fun_scaled)) # minimize optim_res = minimize(**problem) @@ -1066,7 +1069,7 @@ def omega(self) -> ndarray: @property def period(self) -> ndarray: """Period vector [s].""" - return np.concatenate([[np.Infinity], 1/self._freq[1:]]) + return np.concatenate(([np.inf], 1/self._freq[1:])) @property def w1(self) -> float: @@ -1430,6 +1433,9 @@ def time_mat( wt = np.outer(t, omega[1:]) ncomp = ncomponents(nfreq) time_mat = np.empty((nsubsteps*ncomp, ncomp)) + #time_mat = time_mat.at[:, 0].set(1.0) + #time_mat = time_mat.at[:, 1::2].set(np.cos(wt)) + #time_mat = time_mat.at[:, 2::2].set(-np.sin(wt[:, :-1])) # remove 2pt wave sine component time_mat[:, 0] = 1.0 time_mat[:, 1::2] = np.cos(wt) time_mat[:, 2::2] = -np.sin(wt[:, :-1]) # remove 2pt wave sine component @@ -1471,7 +1477,7 @@ def block(n): return np.array([[0, -1], [1, 0]]) * n*f1 * 2*np.pi blocks = [block(n+1) for n in range(nfreq)] if zero_freq: blocks = [0.0] + blocks - deriv_mat = block_diag(*blocks) + deriv_mat = block_diag_jax(*blocks) return deriv_mat[:-1, :-1] # remove 2pt wave sine component @@ -1505,7 +1511,7 @@ def derivative2_mat( Whether the first frequency should be zero. """ vals = [((n+1)*f1 * 2*np.pi)**2 for n in range(nfreq)] - diagonal = np.repeat(-np.ones(nfreq) * vals, 2)[:-1] # remove 2pt wave sine + diagonal = np.repeat(-np.ones(nfreq) * np.array(vals), 2)[:-1] # remove 2pt wave sine if zero_freq: diagonal = np.concatenate(([0.0], diagonal)) return np.diag(diagonal) @@ -1543,26 +1549,28 @@ def mimo_transfer_mat( zero_freq Whether the first frequency should be zero. """ + #if not isinstance(transfer_mat, jnp.ndarray): + # transfer_mat = transfer_mat.values ndof = transfer_mat.shape[1] assert transfer_mat.shape[2] == ndof elem = [[None]*ndof for _ in range(ndof)] - def block(re, im): return np.array([[re, -im], [im, re]]) + def block(re, im): return jnp.array([[re, -im], [im, re]]) for idof in range(ndof): for jdof in range(ndof): if zero_freq: Zp0 = transfer_mat[0, idof, jdof] - assert np.all(np.isreal(Zp0)) - Zp0 = np.real(Zp0) + #assert jnp.all(jnp.isreal(jnp.array(Zp0))) + Zp0 = jnp.real(jnp.array(Zp0)) Zp = transfer_mat[1:, idof, jdof] else: Zp0 = [0.0] Zp = transfer_mat[:, idof, jdof] - re = np.real(Zp) - im = np.imag(Zp) + re = jnp.real(jnp.array(Zp)) + im = jnp.imag(jnp.array(Zp)) blocks = [block(ire, iim) for (ire, iim) in zip(re[:-1], im[:-1])] blocks = [Zp0] + blocks + [re[-1]] - elem[idof][jdof] = block_diag(*blocks) - return np.block(elem) + elem[idof][jdof] = block_diag_jax(*blocks) + return jnp.block(elem) def vec_to_dofmat(vec: ArrayLike, ndof: int) -> ndarray: @@ -1585,7 +1593,7 @@ def vec_to_dofmat(vec: ArrayLike, ndof: int) -> ndarray: -------- dofmat_to_vec, """ - return np.reshape(vec, (-1, ndof), order='F') + return jnp.reshape(jnp.array(vec), (-1, ndof), order='F') def dofmat_to_vec(mat: ArrayLike) -> ndarray: @@ -1604,7 +1612,7 @@ def dofmat_to_vec(mat: ArrayLike) -> ndarray: -------- vec_to_dofmat, """ - return np.reshape(mat, -1, order='F') + return jnp.reshape(jnp.array(mat), -1, order='F') def real_to_complex( @@ -1649,10 +1657,10 @@ def real_to_complex( assert fd.shape[0]%2==0 mean = fd[0:1, :] fd = fd[1:, :] - fdc = np.append(fd[0:-1:2, :] + 1j*fd[1::2, :], - [fd[-1, :]], axis=0) + fdc = jnp.append(jnp.array(fd[0:-1:2, :] + 1j*fd[1::2, :]), + jnp.array([fd[-1, :]]), axis=0) if zero_freq: - fdc = np.concatenate((mean, fdc), axis=0) + fdc = jnp.concatenate((jnp.array(mean), jnp.array(fdc)), axis=0) return fdc @@ -1696,23 +1704,23 @@ def complex_to_real( nfreq = fd.shape[0] - 1 if zero_freq else fd.shape[0] ndof = fd.shape[1] if zero_freq: - assert np.all(np.isreal(fd[0, :])) - a = np.real(fd[0:1, :]) - b = np.real(fd[1:-1, :]) - c = np.imag(fd[1:-1, :]) - d = np.real(fd[-1:, :]) + #assert jnp.all(jnp.isreal(fd[0, :])) + a = jnp.real(fd[0:1, :]) + b = jnp.real(fd[1:-1, :]) + c = jnp.imag(fd[1:-1, :]) + d = jnp.real(fd[-1:, :]) else: - b = np.real(fd[:-1, :]) - c = np.imag(fd[:-1, :]) - d = np.real(fd[-1:, :]) - out = np.concatenate([np.transpose(b), np.transpose(c)]) - out = np.reshape(np.reshape(out, [-1], order='F'), [-1, ndof]) + b = jnp.real(fd[:-1, :]) + c = jnp.imag(fd[:-1, :]) + d = jnp.real(fd[-1:, :]) + out = jnp.concatenate([jnp.transpose(b), jnp.transpose(c)]) + out = jnp.reshape(jnp.reshape(out, [-1], order='F'), [-1, ndof]) if zero_freq: - out = np.concatenate([a, out, d]) - assert out.shape == (2*nfreq, ndof) + out = jnp.concatenate([a, out, d]) + #assert out.shape == (2*nfreq, ndof) else: - out = np.concatenate([out, d]) - assert out.shape == (2*nfreq-1, ndof) + out = jnp.concatenate([out, d]) + #assert out.shape == (2*nfreq-1, ndof) return out @@ -1817,13 +1825,13 @@ def td_to_fd( -------- fd_to_td """ - td= atleast_2d(td) + td = atleast_2d(td) n = td.shape[0] if fft: - fd = np.fft.rfft(td, n=n, axis=0, norm='forward') + fd = jnp.fft.rfft(td, n=n, axis=0, norm='forward') else: - fd = np.dot(dft(n, 'n')[:n//2+1, :], td) - fd = np.concatenate((fd[:1, :], fd[1:-1, :]*2, fd[-1:, :])) + fd = jnp.dot(dft(n, 'n')[:n//2+1, :], td) + fd = jnp.concatenate((fd[:1, :], fd[1:-1, :]*2, fd[-1:, :])) if not zero_freq: fd = fd[1:, :] return fd @@ -1904,8 +1912,8 @@ def check_radiation_damping( ndof = len(hydro_data_new.influenced_dof) assert ndof == len(hydro_data.radiating_dof) for idof in range(ndof): - iradiation = radiation.isel(radiating_dof=idof, influenced_dof=idof) - ifriction = friction.isel(radiating_dof=idof, influenced_dof=idof) + iradiation = radiation.isel(radiating_dof=idof, influenced_dof=idof).values + ifriction = friction.isel(radiating_dof=idof, influenced_dof=idof).values if uniform_shift: dmin = (iradiation+ifriction).min() if dmin < 0.0 + min_damping: @@ -1914,7 +1922,7 @@ def check_radiation_damping( _log.warning( f'Linear damping for DOF "{dof}" has negative or close ' + 'to zero terms. Shifting up radiation damping by ' + - f'{delta.values} N/(m/s).') + f'{delta} N/(m/s).') hydro_data_new['radiation_damping'][:, idof, idof] = (iradiation + delta) else: dof = hydro_data_new.influenced_dof.values[idof] @@ -1924,8 +1932,7 @@ def check_radiation_damping( 'zero terms. Shifting up damping terms ' + f'{np.where(iradiationmin_damping, other=min_damping) + new_damping = np.where(iradiation+ifriction>min_damping, iradiation, min_damping) hydro_data_new['radiation_damping'][:, idof, idof] = new_damping return hydro_data_new @@ -2000,8 +2007,8 @@ def force_from_rao_transfer_function( """ def force(wec, x_wec, x_opt, wave): transfer_mat = mimo_transfer_mat(rao_transfer_mat, zero_freq) - force_fd = wec.vec_to_dofmat(np.dot(transfer_mat, x_wec)) - return np.dot(wec.time_mat, force_fd) + force_fd = wec.vec_to_dofmat(jnp.dot(transfer_mat, x_wec)) + return jnp.dot(wec.time_mat, force_fd) return force @@ -2037,7 +2044,7 @@ def force_from_wave(force_coeff: DataArray, """ def force(wec, x_wec, x_opt, wave): force_fd = complex_to_real(wave_excitation(force_coeff, wave), False) - return np.dot(wec.time_mat[:, 1:], force_fd) + return jnp.dot(wec.time_mat[:, 1:], force_fd) return force @@ -2118,7 +2125,7 @@ def standard_forces(hydro_data: Dataset) -> TForceDict: def run_bem( fb: cpy.FloatingBody, - freq: Iterable[float] = [np.infty], + freq: Iterable[float] = [np.inf], wave_dirs: Iterable[float] = [0], rho: float = _default_parameters['rho'], g: float = _default_parameters['g'], @@ -2268,7 +2275,7 @@ def add_linear_friction( f'Variable "{name}" is already in BEM data ' + 'with same value.') else: - friction_data = atleast_2d(friction) + friction_data = np.atleast_2d(friction) hydro_data['friction'] = (dims, friction_data) elif friction is None: ndof = len(hydro_data["influenced_dof"]) @@ -2325,7 +2332,7 @@ def wave_excitation(exc_coeff: DataArray, wave: DataArray) -> ndarray: f"\n Wave direction(s): {(np.rad2deg(dir_w))} (deg)" + f"\n BEM direction(s): {np.rad2deg(dir_e)} (deg).") - return np.sum(wave_elev_fd*exc_coeff[:, sub_ind, :], axis=1) + return jnp.sum(wave_elev_fd*exc_coeff[:, sub_ind, :], axis=1) def hydrodynamic_impedance(hydro_data: Dataset) -> Dataset: @@ -2360,8 +2367,8 @@ def atleast_2d(array: ArrayLike) -> ndarray: array Input array. """ - array = np.atleast_1d(array) - return np.expand_dims(array, -1) if len(array.shape)==1 else array + array = jnp.atleast_1d(array) + return jnp.expand_dims(array, -1) if len(array.shape)==1 else array def degrees_to_radians( @@ -2379,6 +2386,7 @@ def degrees_to_radians( Whether to sort the angles from smallest to largest in :math:`[-π, π)`. """ + radians = np.asarray(np.remainder(np.deg2rad(degrees), 2*np.pi)) radians[radians > np.pi] -= 2*np.pi if radians.size > 1 and sort: @@ -2544,6 +2552,7 @@ def frequency_parameters( If the zero-frequency was expected but not included or not expected but included. """ + freqs = np.asarray(freqs) if np.isclose(freqs[0], 0.0): if zero_freq: freqs0 = freqs[:] @@ -2554,7 +2563,7 @@ def frequency_parameters( raise ValueError( 'Frequency array must start with the zero frequency.') else: - freqs0 = np.concatenate([[0.0,], freqs]) + freqs0 = np.concatenate([[0.0], freqs]) f1 = freqs0[1] nfreq = len(freqs0) - 1 @@ -2617,3 +2626,52 @@ def set_fb_centers( f'{property} already defined as {getattr(fb, property)}.') return fb + + +def block_diag_jax(*arrays: ArrayLike) -> ndarray: + """Creates a block diagonal matrix from provided arrays. + + Given the inputs A, B, and C, the output will have these + arrays arranged on the diagonal: + + [[A, 0, 0], + [0, B, 0], + [0, 0, C]] + + Parameters + ---------- + arrays + Input arrays. Each array can be up to 2-D. A 1-D array or + array-like sequence of length n is treated as a 2-D array + with shape (1, n). + + Returns + ------- + block_diag_mat + Array with A, B, C, ... on the diagonal. D has the same + dtype as the first input array. + + Notes + ----- + Empty sequences (i.e., array-likes of zero size) will not be ignored. + Noteworthy, both [] and [[]] are treated as matrices with shape (1, 0). + """ + # Convert inputs to JAX arrays and ensure they are at least 2D + blocks = [jnp.atleast_2d(jnp.array(a)) for a in arrays] + + # Compute the total shape of the resulting block diagonal matrix + total_rows = sum(block.shape[0] for block in blocks) + total_cols = sum(block.shape[1] for block in blocks) + + # Create an empty block diagonal matrix + block_diag_mat = jnp.zeros((total_rows, total_cols), dtype=blocks[0].dtype) + + # Fill the block diagonal matrix + r, c = 0, 0 + for block in blocks: + rr, cc = block.shape + block_diag_mat = block_diag_mat.at[r:r + rr, c:c + cc].set(block) + r += rr + c += cc + + return block_diag_mat \ No newline at end of file diff --git a/wecopttool/pto.py b/wecopttool/pto.py index 087a8e533..6b5b52b60 100644 --- a/wecopttool/pto.py +++ b/wecopttool/pto.py @@ -25,9 +25,9 @@ import logging from typing import Optional, TypeVar, Callable, Union -import autograd.numpy as np -from autograd.builtins import isinstance, tuple, list, dict -from autograd.numpy import ndarray +import numpy as np +import jax.numpy as jnp +from numpy import ndarray from scipy.linalg import block_diag from scipy.optimize import OptimizeResult from xarray import DataArray, Dataset, concat @@ -112,12 +112,12 @@ def __init__(self, def kinematics_fun(wec, x_wec, x_opt, wave, nsubsteps=1): pos_wec = wec.vec_to_dofmat(x_wec) tmat = self._tmat(wec, nsubsteps) - pos_wec_td = np.dot(tmat, pos_wec) + pos_wec_td = jnp.dot(tmat, pos_wec) return kinematics(pos_wec_td) else: def kinematics_fun(wec, x_wec, x_opt, wave, nsubsteps=1): n = wec.nt*nsubsteps - return np.repeat(kinematics[:, :, np.newaxis], n, axis=-1) + return jnp.repeat(kinematics[:, :, jnp.newaxis], n, axis=-1) self._kinematics = kinematics_fun # controller @@ -231,11 +231,11 @@ def _fkinematics(self, length. """ time_mat = self._tmat(wec, nsubsteps) - f_wec_td = np.dot(time_mat, f_wec) + f_wec_td = jnp.dot(time_mat, f_wec) assert f_wec_td.shape == (wec.nt*nsubsteps, wec.ndof) - f_wec_td = np.expand_dims(np.transpose(f_wec_td), axis=0) + f_wec_td = jnp.expand_dims(jnp.transpose(f_wec_td), axis=0) kinematics_mat = self.kinematics(wec, x_wec, x_opt, wave, nsubsteps) - return np.transpose(np.sum(kinematics_mat*f_wec_td, axis=1)) + return jnp.transpose(jnp.sum(kinematics_mat*f_wec_td, axis=1)) def position(self, wec: TWEC, @@ -295,7 +295,7 @@ def velocity(self, length. """ pos_wec = wec.vec_to_dofmat(x_wec) - vel_wec = np.dot(wec.derivative_mat, pos_wec) + vel_wec = jnp.dot(wec.derivative_mat, pos_wec) return self._fkinematics(vel_wec, wec, x_wec, x_opt, wave, nsubsteps) def acceleration(self, @@ -304,7 +304,7 @@ def acceleration(self, x_opt: ndarray, wave: Optional[DataArray] = None, nsubsteps: Optional[int] = 1, - ) -> np.ndarray: + ) -> jnp.ndarray: """Calculate the PTO acceleration time-series. Parameters @@ -326,7 +326,7 @@ def acceleration(self, length. """ pos_wec = wec.vec_to_dofmat(x_wec) - acc_wec = np.dot(wec.derivative2_mat, pos_wec) + acc_wec = jnp.dot(wec.derivative2_mat, pos_wec) return self._fkinematics(acc_wec, wec, x_wec, x_opt, wave, nsubsteps) def force_on_wec(self, @@ -358,11 +358,11 @@ def force_on_wec(self, """ force_td = self.force(wec, x_wec, x_opt, wave, nsubsteps) assert force_td.shape == (wec.nt*nsubsteps, self.ndof) - force_td = np.expand_dims(np.transpose(force_td), axis=0) + force_td = jnp.expand_dims(jnp.transpose(force_td), axis=0) assert force_td.shape == (1, self.ndof, wec.nt*nsubsteps) kinematics_mat = self.kinematics(wec, x_wec, x_opt, wave, nsubsteps) - kinematics_mat = np.transpose(kinematics_mat, (1,0,2)) - return np.transpose(np.sum(kinematics_mat*force_td, axis=1)) + kinematics_mat = jnp.transpose(kinematics_mat, (1,0,2)) + return jnp.transpose(jnp.sum(kinematics_mat*force_td, axis=1)) def mechanical_power(self, wec: TWEC, @@ -370,7 +370,7 @@ def mechanical_power(self, x_opt: ndarray, wave: Optional[DataArray] = None, nsubsteps: Optional[int] = 1, - ) -> np.ndarray: + ) -> jnp.ndarray: """Calculate the mechanical power time-series in each PTO DOF for a given system state. @@ -425,7 +425,7 @@ def mechanical_energy(self, length. """ power_td = self.mechanical_power(wec, x_wec, x_opt, wave, nsubsteps) - return np.sum(power_td) * wec.dt/nsubsteps + return jnp.sum(power_td) * wec.dt/nsubsteps def mechanical_average_power(self, wec: TWEC, @@ -493,15 +493,15 @@ def power_variables(self, e1_td = self.force(wec, x_wec, x_opt, wave) q1 = complex_to_real(td_to_fd(q1_td, False)) e1 = complex_to_real(td_to_fd(e1_td, False)) - vars_1 = np.hstack([q1, e1]) + vars_1 = jnp.hstack([q1, e1]) vars_1_flat = dofmat_to_vec(vars_1) - vars_2_flat = np.dot(self.transfer_mat, vars_1_flat) + vars_2_flat = jnp.dot(self.transfer_mat, vars_1_flat) vars_2 = vec_to_dofmat(vars_2_flat, 2*self.ndof) q2 = vars_2[:, :self.ndof] e2 = vars_2[:, self.ndof:] time_mat = self._tmat(wec, nsubsteps) - q2_td = np.dot(time_mat, q2) - e2_td = np.dot(time_mat, e2) + q2_td = jnp.dot(time_mat, q2) + e2_td = jnp.dot(time_mat, e2) else: q2_td = self.velocity(wec, x_wec, x_opt, wave, nsubsteps) e2_td = self.force(wec, x_wec, x_opt, wave, nsubsteps) @@ -572,7 +572,7 @@ def energy(self, length. """ power_td = self.power(wec, x_wec, x_opt, wave, nsubsteps) - return np.sum(power_td) * wec.dt/nsubsteps + return jnp.sum(power_td) * wec.dt/nsubsteps def average_power(self, wec: TWEC, @@ -880,23 +880,23 @@ def _make_abcd(impedance: ndarray, ndof: int) -> ndarray: z_12 = impedance[:ndof, ndof:, :] # Fi z_21 = impedance[ndof:, :ndof, :] # Vu z_22 = impedance[ndof:, ndof:, :] # Vi - z_12_inv = np.linalg.inv(z_12.T).T + z_12_inv = jnp.linalg.inv(z_12.T).T - mmult = lambda a,b: np.einsum('mnr,mnr->mnr', a, b) + mmult = lambda a,b: jnp.einsum('mnr,mnr->mnr', a, b) abcd_11 = -1 * mmult(z_12_inv, z_11) abcd_12 = z_12_inv abcd_21 = z_21 - mmult(z_22, mmult(z_12_inv, z_11)) abcd_22 = mmult(z_22, z_12_inv) - row_1 = np.hstack([abcd_11, abcd_12]) - row_2 = np.hstack([abcd_21, abcd_22]) - return np.vstack([row_1, row_2]) + row_1 = jnp.hstack([abcd_11, abcd_12]) + row_2 = jnp.hstack([abcd_21, abcd_22]) + return jnp.vstack([row_1, row_2]) def _make_mimo_transfer_mat( impedance_abcd: ndarray, ndof: int, -) -> np.ndarray: +) -> jnp.ndarray: """Create a block matrix of a MIMO transfer function. Parameters @@ -906,12 +906,12 @@ def _make_mimo_transfer_mat( ndof Number of degrees of freedom. """ - def block(re, im): return np.array([[re, -im], [im, re]]) + def block(re, im): return jnp.array([[re, -im], [im, re]]) for idof in range(2*ndof): for jdof in range(2*ndof): Zp = impedance_abcd[idof, jdof, :] - re = np.real(Zp) - im = np.imag(Zp) + re = jnp.real(Zp) + im = jnp.imag(Zp) # Exclude the sine component of the 2-point wave blocks = [block(ire, iim) for (ire, iim) in zip(re[:-1], im[:-1])] # re[0] added for the zero frequency power loss (DC), could be re[n] @@ -919,10 +919,10 @@ def block(re, im): return np.array([[re, -im], [im, re]]) if jdof==0: row = block_diag(*blocks) else: - row = np.hstack([row, block_diag(*blocks)]) + row = jnp.hstack([row, block_diag(*blocks)]) if idof==0: mat = row else: - mat = np.vstack([mat, row]) + mat = jnp.vstack([mat, row]) return mat diff --git a/wecopttool/utilities.py b/wecopttool/utilities.py index 93726cc6b..5e4896cc8 100644 --- a/wecopttool/utilities.py +++ b/wecopttool/utilities.py @@ -19,10 +19,8 @@ import logging from pathlib import Path -import autograd.numpy as np -from autograd.numpy import ndarray -from autograd.numpy.linalg import inv -from xarray import DataArray +import numpy as np +from numpy.linalg import inv from numpy.typing import ArrayLike from xarray import DataArray, concat import matplotlib.pyplot as plt @@ -151,8 +149,8 @@ def plot_bode_impedance(impedance: DataArray, """ radiating_dofs = impedance.radiating_dof.values influenced_dofs = impedance.influenced_dof.values - mag = 20.0 * np.log10(np.abs(impedance)) - phase = np.rad2deg(np.unwrap(np.angle(impedance))) + mag = 20.0 * np.log10(np.abs(impedance.values)) + phase = np.rad2deg(np.unwrap(np.angle(impedance.values))) freq = impedance.omega.values/2/np.pi if fig_axes is None: fig, axes = plt.subplots(