From 3af2e2042dd0acae1808544e582b12edc6c1275e Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Tue, 12 Nov 2024 11:18:28 +1300 Subject: [PATCH] chore: reformat codebase with longer line length --- .docs/Notebooks/dis_triangle_example.py | 8 +- .docs/Notebooks/dis_voronoi_example.py | 12 +- .docs/Notebooks/export_tutorial.py | 12 +- .docs/Notebooks/export_vtk_tutorial.py | 16 +- .../external_file_handling_tutorial.py | 6 +- .../Notebooks/get_transmissivities_example.py | 8 +- .docs/Notebooks/gridgen_example.py | 12 +- .../groundwater2023_watershed_example.py | 36 +- .../Notebooks/groundwater_paper_example_1.py | 4 +- .../groundwater_paper_uspb_example.py | 24 +- .docs/Notebooks/mf6_complex_model_example.py | 12 +- .docs/Notebooks/mf6_data_tutorial01.py | 8 +- .docs/Notebooks/mf6_data_tutorial09.py | 8 +- .docs/Notebooks/mf6_lgr_tutorial01.py | 8 +- .docs/Notebooks/mf6_mnw2_tutorial01.py | 11 +- .docs/Notebooks/mf6_output_tutorial01.py | 4 +- .../mf6_parallel_model_splitting_example.py | 8 +- .docs/Notebooks/mf6_support_example.py | 12 +- .docs/Notebooks/mf6_tutorial01.py | 4 +- .docs/Notebooks/mf_tutorial02.py | 4 +- .docs/Notebooks/mfusg_conduit_examples.py | 28 +- .docs/Notebooks/mfusg_freyberg_example.py | 4 +- .docs/Notebooks/mfusg_zaidel_example.py | 4 +- .docs/Notebooks/modelgrid_examples.py | 24 +- .docs/Notebooks/modpath6_example.py | 12 +- .../modpath7_create_simulation_example.py | 12 +- .docs/Notebooks/mt3d-usgs_example.py | 8 +- .docs/Notebooks/mt3dms_examples.py | 20 +- .../Notebooks/mt3dms_sft_lkt_uzt_tutorial.py | 20 +- .docs/Notebooks/nwt_option_blocks_tutorial.py | 8 +- .docs/Notebooks/pest_tutorial01.py | 20 +- .docs/Notebooks/plot_cross_section_example.py | 28 +- .docs/Notebooks/plot_map_view_example.py | 28 +- .../Notebooks/raster_intersection_example.py | 44 +-- .../save_binary_data_file_example.py | 4 +- .docs/Notebooks/seawat_henry_example.py | 4 +- .docs/Notebooks/sfrpackage_example.py | 8 +- .docs/Notebooks/shapefile_feature_examples.py | 4 +- .docs/Notebooks/swi2package_example1.py | 4 +- .docs/Notebooks/swi2package_example2.py | 8 +- .docs/Notebooks/swi2package_example3.py | 18 +- .docs/Notebooks/swi2package_example4.py | 28 +- .docs/Notebooks/swi2package_example5.py | 8 +- .docs/Notebooks/uzf_example.py | 8 +- .docs/Notebooks/vtk_pathlines_example.py | 4 +- .docs/Notebooks/zonebudget_example.py | 12 +- .docs/create_rstfiles.py | 12 +- .../groundwater_paper/scripts/uspb_capture.py | 16 +- .../scripts/uspb_capture_par.py | 40 +- .docs/pysrc/tutorial2.py | 4 +- autotest/conftest.py | 4 +- autotest/regression/test_mf6.py | 372 +++++------------- autotest/regression/test_mf6_pandas.py | 8 +- autotest/regression/test_modflow.py | 51 +-- autotest/regression/test_str.py | 8 +- autotest/regression/test_swi2.py | 8 +- autotest/regression/test_wel.py | 12 +- autotest/test_binaryfile.py | 60 +-- autotest/test_binarygrid_util.py | 45 +-- autotest/test_cbc_full3D.py | 16 +- autotest/test_cellbudgetfile.py | 14 +- autotest/test_compare.py | 12 +- autotest/test_dis_cases.py | 4 +- autotest/test_export.py | 140 ++----- autotest/test_flopy_io.py | 28 +- autotest/test_flopy_module.py | 8 +- autotest/test_gage.py | 8 +- autotest/test_geospatial_util.py | 8 +- autotest/test_get_modflow.py | 11 +- autotest/test_grid.py | 124 ++---- autotest/test_grid_cases.py | 12 +- autotest/test_gridgen.py | 65 +-- autotest/test_gridintersect.py | 32 +- autotest/test_headufile.py | 4 +- autotest/test_hydmodfile.py | 24 +- autotest/test_lake_connections.py | 40 +- autotest/test_listbudget.py | 8 +- autotest/test_mbase.py | 14 +- autotest/test_mf6.py | 129 ++---- autotest/test_mfnwt.py | 16 +- autotest/test_mfreadnam.py | 4 +- autotest/test_mfsimlist.py | 9 +- autotest/test_mnw.py | 27 +- autotest/test_model_dot_plot.py | 12 +- autotest/test_model_splitter.py | 53 +-- autotest/test_modflow.py | 87 +--- autotest/test_modflowoc.py | 4 +- autotest/test_modpathfile.py | 15 +- autotest/test_mp6.py | 84 +--- autotest/test_mp7.py | 32 +- autotest/test_mp7_cases.py | 8 +- autotest/test_mt3d.py | 56 +-- autotest/test_obs.py | 18 +- autotest/test_particledata.py | 49 +-- autotest/test_plot_cross_section.py | 40 +- autotest/test_plot_map_view.py | 12 +- autotest/test_plot_particle_tracks.py | 16 +- autotest/test_plot_quasi3d.py | 8 +- autotest/test_plotutil.py | 28 +- autotest/test_postprocessing.py | 32 +- autotest/test_rasters.py | 20 +- autotest/test_seawat.py | 4 +- autotest/test_sfr.py | 42 +- autotest/test_shapefile_utils.py | 8 +- autotest/test_specific_discharge.py | 26 +- autotest/test_subwt.py | 4 +- autotest/test_swr_binaryread.py | 160 ++------ autotest/test_usg.py | 9 +- autotest/test_util_2d_and_3d.py | 28 +- autotest/test_util_geometry.py | 8 +- autotest/test_uzf.py | 62 +-- autotest/test_zonbud_utility.py | 14 +- flopy/discretization/grid.py | 31 +- flopy/discretization/structuredgrid.py | 89 ++--- flopy/discretization/unstructuredgrid.py | 37 +- flopy/discretization/vertexgrid.py | 24 +- flopy/export/metadata.py | 28 +- flopy/export/netcdf.py | 116 ++---- flopy/export/shapefile_utils.py | 34 +- flopy/export/utils.py | 140 ++----- flopy/export/vtk.py | 71 +--- flopy/mbase.py | 82 ++-- flopy/mfusg/mfusg.py | 28 +- flopy/mfusg/mfusgbcf.py | 20 +- flopy/mfusg/mfusgcln.py | 39 +- flopy/mfusg/mfusgdisu.py | 48 +-- flopy/mfusg/mfusggnc.py | 8 +- flopy/mfusg/mfusglpf.py | 16 +- flopy/mfusg/mfusgsms.py | 5 +- flopy/mfusg/mfusgwel.py | 12 +- flopy/modflow/mf.py | 36 +- flopy/modflow/mfaddoutsidefile.py | 4 +- flopy/modflow/mfag.py | 32 +- flopy/modflow/mfbas.py | 4 +- flopy/modflow/mfbcf.py | 16 +- flopy/modflow/mfchd.py | 4 +- flopy/modflow/mfdis.py | 40 +- flopy/modflow/mfdrn.py | 4 +- flopy/modflow/mfdrt.py | 4 +- flopy/modflow/mfevt.py | 49 +-- flopy/modflow/mffhb.py | 40 +- flopy/modflow/mfflwob.py | 20 +- flopy/modflow/mfgage.py | 11 +- flopy/modflow/mfghb.py | 4 +- flopy/modflow/mfgmg.py | 12 +- flopy/modflow/mfhfb.py | 8 +- flopy/modflow/mfhob.py | 7 +- flopy/modflow/mfhyd.py | 4 +- flopy/modflow/mflak.py | 40 +- flopy/modflow/mflpf.py | 16 +- flopy/modflow/mfmnw1.py | 27 +- flopy/modflow/mfmnw2.py | 117 ++---- flopy/modflow/mfmnwi.py | 40 +- flopy/modflow/mfnwt.py | 4 +- flopy/modflow/mfoc.py | 12 +- flopy/modflow/mfpar.py | 8 +- flopy/modflow/mfpbc.py | 4 +- flopy/modflow/mfpcgn.py | 20 +- flopy/modflow/mfpks.py | 5 +- flopy/modflow/mfrch.py | 65 +-- flopy/modflow/mfriv.py | 10 +- flopy/modflow/mfsfr2.py | 284 ++++--------- flopy/modflow/mfsor.py | 5 +- flopy/modflow/mfstr.py | 37 +- flopy/modflow/mfsub.py | 12 +- flopy/modflow/mfswi2.py | 24 +- flopy/modflow/mfswr1.py | 8 +- flopy/modflow/mfswt.py | 16 +- flopy/modflow/mfupw.py | 15 +- flopy/modflow/mfuzf1.py | 48 +-- flopy/modflow/mfwel.py | 26 +- flopy/modflow/mfzon.py | 4 +- flopy/modflowlgr/mflgr.py | 22 +- flopy/modpath/mp6.py | 36 +- flopy/modpath/mp6bas.py | 4 +- flopy/modpath/mp6sim.py | 28 +- flopy/modpath/mp7.py | 25 +- flopy/modpath/mp7bas.py | 4 +- flopy/modpath/mp7particledata.py | 72 +--- flopy/modpath/mp7particlegroup.py | 24 +- flopy/modpath/mp7sim.py | 23 +- flopy/mt3d/mt.py | 46 +-- flopy/mt3d/mtadv.py | 7 +- flopy/mt3d/mtbtn.py | 20 +- flopy/mt3d/mtdsp.py | 7 +- flopy/mt3d/mtlkt.py | 37 +- flopy/mt3d/mtrct.py | 6 +- flopy/mt3d/mtsft.py | 24 +- flopy/mt3d/mtssm.py | 24 +- flopy/mt3d/mttob.py | 12 +- flopy/mt3d/mtuzt.py | 12 +- flopy/pakbase.py | 66 +--- flopy/plot/crosssection.py | 86 ++-- flopy/plot/map.py | 24 +- flopy/plot/plotutil.py | 47 +-- flopy/plot/styles.py | 20 +- flopy/seawat/swt.py | 16 +- flopy/seawat/swtvdf.py | 11 +- flopy/seawat/swtvsc.py | 3 +- flopy/utils/binaryfile.py | 119 ++---- flopy/utils/check.py | 65 +-- flopy/utils/compare.py | 71 +--- flopy/utils/cvfdutil.py | 7 +- flopy/utils/datafile.py | 34 +- flopy/utils/datautil.py | 56 +-- flopy/utils/flopy_io.py | 8 +- flopy/utils/formattedfile.py | 13 +- flopy/utils/geometry.py | 22 +- flopy/utils/geospatial_utils.py | 20 +- flopy/utils/get_modflow.py | 63 +-- flopy/utils/gridgen.py | 60 +-- flopy/utils/gridintersect.py | 83 +--- flopy/utils/gridutil.py | 4 +- flopy/utils/lgrutil.py | 20 +- flopy/utils/mflistfile.py | 33 +- flopy/utils/modpathfile.py | 41 +- flopy/utils/mtlistfile.py | 50 +-- flopy/utils/observationfile.py | 4 +- flopy/utils/optionblock.py | 8 +- flopy/utils/parse_version.py | 23 +- flopy/utils/particletrackfile.py | 12 +- flopy/utils/postprocessing.py | 50 +-- flopy/utils/rasters.py | 12 +- flopy/utils/sfroutputfile.py | 8 +- flopy/utils/swroutputfile.py | 20 +- flopy/utils/triangle.py | 8 +- flopy/utils/util_array.py | 102 ++--- flopy/utils/util_list.py | 79 +--- flopy/utils/utils_def.py | 12 +- flopy/utils/utl_import.py | 4 +- flopy/utils/voronoi.py | 8 +- flopy/utils/zonbud.py | 284 ++++--------- pyproject.toml | 11 +- scripts/process_benchmarks.py | 8 +- scripts/update_version.py | 4 +- 235 files changed, 1828 insertions(+), 5201 deletions(-) diff --git a/.docs/Notebooks/dis_triangle_example.py b/.docs/Notebooks/dis_triangle_example.py index 3d0699e694..8bce5df812 100644 --- a/.docs/Notebooks/dis_triangle_example.py +++ b/.docs/Notebooks/dis_triangle_example.py @@ -198,9 +198,7 @@ sim = flopy.mf6.MFSimulation( sim_name=name, version="mf6", exe_name="mf6", sim_ws=workspace ) -tdis = flopy.mf6.ModflowTdis( - sim, time_units="DAYS", perioddata=[[1.0, 1, 1.0]] -) +tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", perioddata=[[1.0, 1, 1.0]]) gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) ims = flopy.mf6.ModflowIms( sim, @@ -227,9 +225,7 @@ vertices=vertices, cell2d=cell2d, ) -npf = flopy.mf6.ModflowGwfnpf( - gwf, xt3doptions=[(True)], save_specific_discharge=None -) +npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=[(True)], save_specific_discharge=None) ic = flopy.mf6.ModflowGwfic(gwf) diff --git a/.docs/Notebooks/dis_voronoi_example.py b/.docs/Notebooks/dis_voronoi_example.py index 73d81fef33..126603cde1 100644 --- a/.docs/Notebooks/dis_voronoi_example.py +++ b/.docs/Notebooks/dis_voronoi_example.py @@ -130,9 +130,7 @@ sim = flopy.mf6.MFSimulation( sim_name=name, version="mf6", exe_name="mf6", sim_ws=sim_ws ) -tdis = flopy.mf6.ModflowTdis( - sim, time_units="DAYS", perioddata=[[1.0, 1, 1.0]] -) +tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", perioddata=[[1.0, 1, 1.0]]) gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) ims = flopy.mf6.ModflowIms( sim, @@ -145,9 +143,7 @@ nlay = 1 top = 1.0 botm = [0.0] -disv = flopy.mf6.ModflowGwfdisv( - gwf, nlay=nlay, **disv_gridprops, top=top, botm=botm -) +disv = flopy.mf6.ModflowGwfdisv(gwf, nlay=nlay, **disv_gridprops, top=top, botm=botm) npf = flopy.mf6.ModflowGwfnpf( gwf, xt3doptions=[(True)], @@ -209,9 +205,7 @@ nlay = 1 top = 1.0 botm = [0.0] -disv = flopy.mf6.ModflowGwtdisv( - gwt, nlay=nlay, **disv_gridprops, top=top, botm=botm -) +disv = flopy.mf6.ModflowGwtdisv(gwt, nlay=nlay, **disv_gridprops, top=top, botm=botm) ic = flopy.mf6.ModflowGwtic(gwt, strt=0.0) sto = flopy.mf6.ModflowGwtmst(gwt, porosity=0.2) adv = flopy.mf6.ModflowGwtadv(gwt, scheme="TVD") diff --git a/.docs/Notebooks/export_tutorial.py b/.docs/Notebooks/export_tutorial.py index 9751b3f1a2..cf830e04d9 100644 --- a/.docs/Notebooks/export_tutorial.py +++ b/.docs/Notebooks/export_tutorial.py @@ -31,9 +31,7 @@ # Load our old friend...the Freyberg model nam_file = "freyberg.nam" -model_ws = os.path.join( - "..", "..", "examples", "data", "freyberg_multilayer_transient" -) +model_ws = os.path.join("..", "..", "examples", "data", "freyberg_multilayer_transient") ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False) # We can see the ``Modelgrid`` instance has generic entries, as does ``start_datetime`` @@ -44,9 +42,7 @@ # Setting the attributes of the ``ml.modelgrid`` is easy: -ml.modelgrid.set_coord_info( - xoff=123456.7, yoff=765432.1, angrot=15.0, crs=3070 -) +ml.modelgrid.set_coord_info(xoff=123456.7, yoff=765432.1, angrot=15.0, crs=3070) ml.dis.start_datetime = "7/4/1776" ml.modeltime.start_datetime @@ -125,9 +121,7 @@ export_dict = {"hds": hds, "cbc": cbc} # export head and cell budget outputs to netcdf -fnc = flopy.export.utils.output_helper( - os.path.join(pth, "output.nc"), ml, export_dict -) +fnc = flopy.export.utils.output_helper(os.path.join(pth, "output.nc"), ml, export_dict) # - try: diff --git a/.docs/Notebooks/export_vtk_tutorial.py b/.docs/Notebooks/export_vtk_tutorial.py index c21d40919a..9ccc2c0835 100644 --- a/.docs/Notebooks/export_vtk_tutorial.py +++ b/.docs/Notebooks/export_vtk_tutorial.py @@ -45,9 +45,7 @@ # load model for examples nam_file = "freyberg.nam" model_ws = Path( - os.path.join( - "..", "..", "examples", "data", "freyberg_multilayer_transient" - ) + os.path.join("..", "..", "examples", "data", "freyberg_multilayer_transient") ) ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False) @@ -107,9 +105,7 @@ # 3D Array export # hk export, with points model_hk_dir = output_dir / "HK" -ml.upw.hk.export( - model_hk_dir, smooth=True, fmt="vtk", name="HK", point_scalars=True -) +ml.upw.hk.export(model_hk_dir, smooth=True, fmt="vtk", name="HK", point_scalars=True) # ### Package export to .vtu files # @@ -312,9 +308,7 @@ # + # export heads as point scalars -vtkobj = vtk.Vtk( - ml, xml=True, pvd=True, point_scalars=True, vertical_exageration=10 -) +vtkobj = vtk.Vtk(ml, xml=True, pvd=True, point_scalars=True, vertical_exageration=10) # export heads for time step 1, stress periods 1, 50, 100, 1000 vtkobj.add_heads(hds, kstpkper=[(0, 0), (0, 49), (0, 99), (0, 999)]) @@ -559,9 +553,7 @@ def run_vertex_grid_example(ws): # riv riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] rivcells = g.intersect(riverline, "line", 0) - rivspd = [ - [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] - ] + rivspd = [[(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]] riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) # output control diff --git a/.docs/Notebooks/external_file_handling_tutorial.py b/.docs/Notebooks/external_file_handling_tutorial.py index f8ee402be5..fecb1073d8 100644 --- a/.docs/Notebooks/external_file_handling_tutorial.py +++ b/.docs/Notebooks/external_file_handling_tutorial.py @@ -100,11 +100,7 @@ # list the files in model_ws that have 'hk' in the name print( "\n".join( - [ - name - for name in os.listdir(ml.model_ws) - if "hk" in name or "impor" in name - ] + [name for name in os.listdir(ml.model_ws) if "hk" in name or "impor" in name] ) ) diff --git a/.docs/Notebooks/get_transmissivities_example.py b/.docs/Notebooks/get_transmissivities_example.py index dbb7307935..85519393a7 100644 --- a/.docs/Notebooks/get_transmissivities_example.py +++ b/.docs/Notebooks/get_transmissivities_example.py @@ -76,9 +76,7 @@ model_ws = temp_dir.name m = flopy.modflow.Modflow("junk", version="mfnwt", model_ws=model_ws) -dis = flopy.modflow.ModflowDis( - m, nlay=nl, nrow=nr, ncol=nc, botm=botm, top=top -) +dis = flopy.modflow.ModflowDis(m, nlay=nl, nrow=nr, ncol=nc, botm=botm, top=top) upw = flopy.modflow.ModflowUpw(m, hk=hk) # - @@ -88,9 +86,7 @@ # (cells that are partially within the open interval have reduced thickness, cells outside of the open interval have transmissivities of 0). If no `sctop` or `scbot` arguments are supplied, trasmissivites reflect the full saturated thickness in each column of cells (see plot below, which shows different open intervals relative to the model layering) r, c = np.arange(nr), np.arange(nc) -T = flopy.utils.get_transmissivities( - heads, m, r=r, c=c, sctop=sctop, scbot=scbot -) +T = flopy.utils.get_transmissivities(heads, m, r=r, c=c, sctop=sctop, scbot=scbot) np.round(T, 2) m.dis.botm.array[:, r, c] diff --git a/.docs/Notebooks/gridgen_example.py b/.docs/Notebooks/gridgen_example.py index a9d944bfa6..59af49f20f 100644 --- a/.docs/Notebooks/gridgen_example.py +++ b/.docs/Notebooks/gridgen_example.py @@ -55,9 +55,7 @@ ) print(msg) else: - print( - f"gridgen executable was found at: {flopy_io.relpath_safe(gridgen_exe)}" - ) + print(f"gridgen executable was found at: {flopy_io.relpath_safe(gridgen_exe)}") # + # temporary directory @@ -264,9 +262,7 @@ gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) disv = flopy.mf6.ModflowGwfdisv(gwf, **disv_gridprops) ic = flopy.mf6.ModflowGwfic(gwf) -npf = flopy.mf6.ModflowGwfnpf( - gwf, xt3doptions=True, save_specific_discharge=True -) +npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True) chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd) budget_file = f"{name}.bud" head_file = f"{name}.hds" @@ -367,9 +363,7 @@ gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) disu = flopy.mf6.ModflowGwfdisu(gwf, **disu_gridprops) ic = flopy.mf6.ModflowGwfic(gwf) -npf = flopy.mf6.ModflowGwfnpf( - gwf, xt3doptions=True, save_specific_discharge=True -) +npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True) chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd) budget_file = f"{name}.bud" head_file = f"{name}.hds" diff --git a/.docs/Notebooks/groundwater2023_watershed_example.py b/.docs/Notebooks/groundwater2023_watershed_example.py index 8621505dd4..3c14c6a30c 100644 --- a/.docs/Notebooks/groundwater2023_watershed_example.py +++ b/.docs/Notebooks/groundwater2023_watershed_example.py @@ -70,9 +70,7 @@ def densify_geometry(line, step, keep_internal_nodes=True): lines_strings = [] if keep_internal_nodes: for idx in range(1, len(line)): - lines_strings.append( - shapely.geometry.LineString(line[idx - 1 : idx + 1]) - ) + lines_strings.append(shapely.geometry.LineString(line[idx - 1 : idx + 1])) else: lines_strings = [shapely.geometry.LineString(line)] @@ -262,9 +260,7 @@ def set_idomain(grid, boundary): multiplier = 1.175 transition = 20000.0 ncells = 7 -smoothr = [ - transition * (multiplier - 1.0) / (multiplier ** float(ncells) - 1.0) -] +smoothr = [transition * (multiplier - 1.0) / (multiplier ** float(ncells) - 1.0)] for i in range(ncells - 1): smoothr.append(smoothr[i] * multiplier) smooth = smoothr.copy() @@ -326,9 +322,7 @@ def set_idomain(grid, boundary): alpha=0.2, cmap="Reds_r", ) -cg = pmv.contour_array( - top_sg_vrc, levels=levels, linewidths=0.3, colors="0.75" -) +cg = pmv.contour_array(top_sg_vrc, levels=levels, linewidths=0.3, colors="0.75") pmv.plot_inactive() ax.plot(bp[:, 0], bp[:, 1], "k-") @@ -503,9 +497,7 @@ def set_idomain(grid, boundary): delc=dx, ) g = Gridgen(gwf.modelgrid, model_ws=temp_path) -adpoly = [ - [[(1000, 1000), (3000, 1000), (3000, 2000), (1000, 2000), (1000, 1000)]] -] +adpoly = [[[(1000, 1000), (3000, 1000), (3000, 2000), (1000, 2000), (1000, 1000)]]] adpoly = boundary_polygon + [boundary_polygon[0]] adpoly = [[adpoly]] g.add_refinement_features([lgr_poly], "polygon", 2, range(1)) @@ -570,9 +562,7 @@ def set_idomain(grid, boundary): nodes = np.array(nodes) # + -tri = Triangle( - maximum_area=5000 * 5000, angle=30, nodes=nodes, model_ws=temp_path -) +tri = Triangle(maximum_area=5000 * 5000, angle=30, nodes=nodes, model_ws=temp_path) poly = bp tri.add_polygon(poly) tri.build(verbose=False) @@ -753,9 +743,7 @@ def set_idomain(grid, boundary): gg = grids[idx] tt = topo_grids[idx] for g, t in zip(gg[1:], tt[1:]): - pmvc = flopy.plot.PlotMapView( - modelgrid=g, ax=ax, extent=extent - ) + pmvc = flopy.plot.PlotMapView(modelgrid=g, ax=ax, extent=extent) pmvc.plot_array(top_ngc, vmin=vmin, vmax=vmax) pmvc.plot_grid(**grid_dict) cgc = pmvc.contour_array(top_ngc, **contour_dict) @@ -873,9 +861,7 @@ def set_idomain(grid, boundary): length=9, pad=2, ) - cbar.ax.set_title( - "Elevation (m)", pad=2.5, loc="center", fontdict=font_dict - ) + cbar.ax.set_title("Elevation (m)", pad=2.5, loc="center", fontdict=font_dict) # - # ### Plot the river intersection for the six grids @@ -939,12 +925,8 @@ def set_idomain(grid, boundary): gg = grids[idx] tt = intersections[idx] for g, t in zip(gg[1:], tt[1:]): - pmvc = flopy.plot.PlotMapView( - modelgrid=g, ax=ax, extent=extent - ) - pmvc.plot_array( - t, masked_values=(0,), cmap=intersection_cmap - ) + pmvc = flopy.plot.PlotMapView(modelgrid=g, ax=ax, extent=extent) + pmvc.plot_array(t, masked_values=(0,), cmap=intersection_cmap) pmvc.plot_grid(**grid_dict) # plot lgr polyline diff --git a/.docs/Notebooks/groundwater_paper_example_1.py b/.docs/Notebooks/groundwater_paper_example_1.py index 4afe99585a..34cb627eae 100644 --- a/.docs/Notebooks/groundwater_paper_example_1.py +++ b/.docs/Notebooks/groundwater_paper_example_1.py @@ -48,9 +48,7 @@ # The discretization of the model is specified with the discretization file (DIS) of MODFLOW. The aquifer is divided into 201 cells of length 10 m and width 1 m. The first input of the discretization package is the name of the model object. All other input arguments are self explanatory. -fpm.ModflowDis( - model, nlay=1, nrow=1, ncol=201, delr=10, delc=1, top=50, botm=0 -) +fpm.ModflowDis(model, nlay=1, nrow=1, ncol=201, delr=10, delc=1, top=50, botm=0) # Active cells and the like are defined with the Basic package (BAS), which is required for every MODFLOW model. It contains the {\tt ibound} array, which is used to specify which cells are active (value is positive), inactive (value is 0), or fixed head (value is negative). The {\tt numpy} package (aliased as {\tt np}) can be used to quickly initialize the {\tt ibound} array with values of 1, and then set the {\tt ibound} value for the first and last columns to -1. The {\tt numpy} package (and Python, in general) uses zero-based indexing and supports negative indexing so that row 1 and column 1, and row 1 and column 201, can be referenced as [0, 0], and [0, -1], respectively. Although this simulation is for steady flow, starting heads still need to be specified. They are used as the head for fixed-head cells (where {\tt ibound} is negative), and as a starting point to compute the saturated thickness for cases of unconfined flow. diff --git a/.docs/Notebooks/groundwater_paper_uspb_example.py b/.docs/Notebooks/groundwater_paper_uspb_example.py index 1fa202af4a..690f6ef27b 100644 --- a/.docs/Notebooks/groundwater_paper_uspb_example.py +++ b/.docs/Notebooks/groundwater_paper_uspb_example.py @@ -109,21 +109,15 @@ # + hedObj = flopy.utils.HeadFile(os.path.join(ws, "DG.hds"), precision="double") h = hedObj.get_data(kstpkper=(0, 0)) -cbcObj = flopy.utils.CellBudgetFile( - os.path.join(ws, "DG.cbc"), precision="double" -) +cbcObj = flopy.utils.CellBudgetFile(os.path.join(ws, "DG.cbc"), precision="double") frf = cbcObj.get_data(kstpkper=(0, 0), text="FLOW RIGHT FACE")[0] fff = cbcObj.get_data(kstpkper=(0, 0), text="FLOW FRONT FACE")[0] -qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge( - (frf, fff, None), ml -) +qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge((frf, fff, None), ml) # + cnt = np.arange(1200, 1700, 100) -f, (ax1, ax2) = plt.subplots( - 1, 2, figsize=(6.75, 4.47), constrained_layout=True -) +f, (ax1, ax2) = plt.subplots(1, 2, figsize=(6.75, 4.47), constrained_layout=True) ax1.set_xlim(0, xmax) ax1.set_ylim(0, ymax) ax2.set_xlim(0, xmax) @@ -177,9 +171,7 @@ cb = plt.colorbar(h2, cax=ax3) cb.ax.set_ylabel("Simulated head, m") -ax1.plot( - [-10000, 0], [-10000, 0], color="purple", lw=0.75, label="STR reaches" -) +ax1.plot([-10000, 0], [-10000, 0], color="purple", lw=0.75, label="STR reaches") ax1.plot( [-10000], [-10000], @@ -193,9 +185,7 @@ leg = ax1.legend(loc="upper left", numpoints=1, prop={"size": 6}) leg.draw_frame(False) -ax1.text( - 0.0, 1.01, "Model layer 4", ha="left", va="bottom", transform=ax1.transAxes -) +ax1.text(0.0, 1.01, "Model layer 4", ha="left", va="bottom", transform=ax1.transAxes) ax2.text( 0.98, 0.02, @@ -204,9 +194,7 @@ va="bottom", transform=ax2.transAxes, ) -ax2.text( - 0.0, 1.01, "Model layer 5", ha="left", va="bottom", transform=ax2.transAxes -) +ax2.text(0.0, 1.01, "Model layer 5", ha="left", va="bottom", transform=ax2.transAxes) plt.savefig(os.path.join(ws, "uspb_heads.png"), dpi=300) # - diff --git a/.docs/Notebooks/mf6_complex_model_example.py b/.docs/Notebooks/mf6_complex_model_example.py index f999b00d96..01bdc9b3e0 100644 --- a/.docs/Notebooks/mf6_complex_model_example.py +++ b/.docs/Notebooks/mf6_complex_model_example.py @@ -112,9 +112,7 @@ ) # initial conditions -ic = flopy.mf6.ModflowGwfic( - gwf, pname="ic", strt=50.0, filename=f"{model_name}.ic" -) +ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=50.0, filename=f"{model_name}.ic") # node property flow npf = flopy.mf6.ModflowGwfnpf( @@ -143,9 +141,7 @@ for layer in range(0, 3): sy[layer]["data"] = 0.2 -ss = flopy.mf6.ModflowGwfsto.ss.empty( - gwf, layered=True, default_value=0.000001 -) +ss = flopy.mf6.ModflowGwfsto.ss.empty(gwf, layered=True, default_value=0.000001) sto = flopy.mf6.ModflowGwfsto( gwf, @@ -296,9 +292,7 @@ obs_recarray = { "head_obs.csv": [("h1_13_8", "HEAD", (2, 12, 7))], - "intercell_flow_obs1.csv": [ - ("ICF1_1.0", "FLOW-JA-FACE", (0, 4, 5), (0, 5, 5)) - ], + "intercell_flow_obs1.csv": [("ICF1_1.0", "FLOW-JA-FACE", (0, 4, 5), (0, 5, 5))], "head-hydrographs.csv": [ ("h3-13-9", "HEAD", (2, 12, 8)), ("h3-12-8", "HEAD", (2, 11, 7)), diff --git a/.docs/Notebooks/mf6_data_tutorial01.py b/.docs/Notebooks/mf6_data_tutorial01.py index 54e3d58a96..989b3f56d9 100644 --- a/.docs/Notebooks/mf6_data_tutorial01.py +++ b/.docs/Notebooks/mf6_data_tutorial01.py @@ -51,18 +51,14 @@ # set up simulation and basic packages sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=workspace) -flopy.mf6.ModflowTdis( - sim, nper=10, perioddata=[[365.0, 1, 1.0] for _ in range(10)] -) +flopy.mf6.ModflowTdis(sim, nper=10, perioddata=[[365.0, 1, 1.0] for _ in range(10)]) flopy.mf6.ModflowIms(sim) gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) botm = [30.0, 20.0, 10.0] flopy.mf6.ModflowGwfdis(gwf, nlay=3, nrow=4, ncol=5, top=50.0, botm=botm) flopy.mf6.ModflowGwfic(gwf) flopy.mf6.ModflowGwfnpf(gwf, save_specific_discharge=True) -flopy.mf6.ModflowGwfchd( - gwf, stress_period_data=[[(0, 0, 0), 1.0], [(2, 3, 4), 0.0]] -) +flopy.mf6.ModflowGwfchd(gwf, stress_period_data=[[(0, 0, 0), 1.0], [(2, 3, 4), 0.0]]) budget_file = f"{name}.bud" head_file = f"{name}.hds" flopy.mf6.ModflowGwfoc( diff --git a/.docs/Notebooks/mf6_data_tutorial09.py b/.docs/Notebooks/mf6_data_tutorial09.py index e2e3466232..30dbea39f1 100644 --- a/.docs/Notebooks/mf6_data_tutorial09.py +++ b/.docs/Notebooks/mf6_data_tutorial09.py @@ -55,9 +55,7 @@ # set up first groundwater flow model name_1 = "ex_1_mod_1" model_nam_file = f"{name_1}.nam" -gwf = flopy.mf6.ModflowGwf( - sim, modelname=name_1, model_nam_file=model_nam_file -) +gwf = flopy.mf6.ModflowGwf(sim, modelname=name_1, model_nam_file=model_nam_file) # create the discretization package bot = [-10.0, -50.0, -200.0] delrow = delcol = 4.0 @@ -116,9 +114,7 @@ # set up second groundwater flow model with a finer grid name_1 = "ex_1_mod_2" model_nam_file = f"{name_1}.nam" -gwf_2 = flopy.mf6.ModflowGwf( - sim, modelname=name_1, model_nam_file=model_nam_file -) +gwf_2 = flopy.mf6.ModflowGwf(sim, modelname=name_1, model_nam_file=model_nam_file) # create the flopy iterative model solver (ims) package object # by default flopy will register both models with the ims package. ims = flopy.mf6.modflow.mfims.ModflowIms( diff --git a/.docs/Notebooks/mf6_lgr_tutorial01.py b/.docs/Notebooks/mf6_lgr_tutorial01.py index 172756ccfe..1e4f3d168a 100644 --- a/.docs/Notebooks/mf6_lgr_tutorial01.py +++ b/.docs/Notebooks/mf6_lgr_tutorial01.py @@ -577,12 +577,8 @@ # pmvc.plot_array(head[1], vmin=0., vmax=1.) # contour head -cs = pmvp.contour_array( - head[0], levels=np.linspace(0, 1), masked_values=[1.0e30] -) -cs = pmvc.contour_array( - head[1], levels=np.linspace(0, 1), masked_values=[1.0e30] -) +cs = pmvp.contour_array(head[0], levels=np.linspace(0, 1), masked_values=[1.0e30]) +cs = pmvc.contour_array(head[1], levels=np.linspace(0, 1), masked_values=[1.0e30]) # color flood concentrations a1 = conc[0] diff --git a/.docs/Notebooks/mf6_mnw2_tutorial01.py b/.docs/Notebooks/mf6_mnw2_tutorial01.py index 6f09a523a6..3703512e16 100644 --- a/.docs/Notebooks/mf6_mnw2_tutorial01.py +++ b/.docs/Notebooks/mf6_mnw2_tutorial01.py @@ -41,9 +41,7 @@ model_ws = temp_dir.name m = flopy.modflow.Modflow("mnw2example", model_ws=model_ws) -dis = flopy.modflow.ModflowDis( - nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m -) +dis = flopy.modflow.ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m) # - # ### MNW2 information by node @@ -178,8 +176,7 @@ mnw2.write_file(os.path.join(model_ws, "test.mnw2")) junk = [ - print(l.strip("\n")) - for l in open(os.path.join(model_ws, "test.mnw2")).readlines() + print(l.strip("\n")) for l in open(os.path.join(model_ws, "test.mnw2")).readlines() ] # ### Load some example MNW2 packages @@ -203,9 +200,7 @@ path = os.path.join("..", "..", "examples", "data", "mnw2_examples") m = flopy.modflow.Modflow("br", model_ws=model_ws) -mnw2 = flopy.modflow.ModflowMnw2.load( - os.path.join(path, "BadRiver_cal.mnw2"), m -) +mnw2 = flopy.modflow.ModflowMnw2.load(os.path.join(path, "BadRiver_cal.mnw2"), m) df = pd.DataFrame(mnw2.node_data) df.loc[:, df.sum(axis=0) != 0] diff --git a/.docs/Notebooks/mf6_output_tutorial01.py b/.docs/Notebooks/mf6_output_tutorial01.py index 96bc0d1f8a..4044c1689e 100644 --- a/.docs/Notebooks/mf6_output_tutorial01.py +++ b/.docs/Notebooks/mf6_output_tutorial01.py @@ -34,9 +34,7 @@ exe_name = "mf6" project_root_path = Path.cwd().parent.parent ws = os.path.abspath(os.path.dirname("")) -sim_ws = str( - project_root_path / "examples" / "data" / "mf6" / "test001e_UZF_3lay" -) +sim_ws = str(project_root_path / "examples" / "data" / "mf6" / "test001e_UZF_3lay") # load the model sim = flopy.mf6.MFSimulation.load( diff --git a/.docs/Notebooks/mf6_parallel_model_splitting_example.py b/.docs/Notebooks/mf6_parallel_model_splitting_example.py index a753f1fcb5..2e90eac30d 100644 --- a/.docs/Notebooks/mf6_parallel_model_splitting_example.py +++ b/.docs/Notebooks/mf6_parallel_model_splitting_example.py @@ -384,9 +384,7 @@ def string2geom(geostring, conversion=None): if idomain[r, c] < 1: continue conductance = leakance * dx * dy - gw_discharge_data.append( - (0, r, c, modelgrid.top[r, c] - 0.5, conductance, 1.0) - ) + gw_discharge_data.append((0, r, c, modelgrid.top[r, c] - 0.5, conductance, 1.0)) gw_discharge_data[:10] # - @@ -498,9 +496,7 @@ def string2geom(geostring, conversion=None): # Plot the model results # + -water_table = flopy.utils.postprocessing.get_water_table( - gwf.output.head().get_data() -) +water_table = flopy.utils.postprocessing.get_water_table(gwf.output.head().get_data()) heads = gwf.output.head().get_data() hmin, hmax = water_table.min(), water_table.max() contours = np.arange(0, 100, 10) diff --git a/.docs/Notebooks/mf6_support_example.py b/.docs/Notebooks/mf6_support_example.py index 9a23b2b344..c2c6cb7ae0 100644 --- a/.docs/Notebooks/mf6_support_example.py +++ b/.docs/Notebooks/mf6_support_example.py @@ -172,9 +172,7 @@ flopy.mf6.data.mfdatastorage.DataStorageType.internal_array, flopy.mf6.data.mfdatastorage.DataStorageType.internal_constant, ] -k_template = flopy.mf6.ModflowGwfnpf.k.empty( - model, True, layer_storage_types, 100.0 -) +k_template = flopy.mf6.ModflowGwfnpf.k.empty(model, True, layer_storage_types, 100.0) # change the value of the second layer to 50.0 k_template[0]["data"] = [ 65.0, @@ -392,9 +390,7 @@ 0: {"filename": "chd_sp1.dat", "data": [[(0, 0, 0), 70.0]]}, 1: [[(0, 0, 0), 60.0]], } -chd = flopy.mf6.ModflowGwfchd( - model, maxbound=1, stress_period_data=stress_period_data -) +chd = flopy.mf6.ModflowGwfchd(model, maxbound=1, stress_period_data=stress_period_data) # ## Packages that Support both List-based and Array-based Data # @@ -568,9 +564,7 @@ # Data can be modified in several ways. One way is to set data for a given layer within a LayerStorage object, like the one accessed in the code above. Another way is to set the data attribute to the new data. Yet another way is to call the data object's set_data method. # set data within a LayerStorage object -hk_layer_one.set_data( - [120.0, 100.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 25.0, 20.0] -) +hk_layer_one.set_data([120.0, 100.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 25.0, 20.0]) print(f"New HK data no factor:\n{hk.get_data()}\n") # set data attribute to new data ic_package.strt = 150.0 diff --git a/.docs/Notebooks/mf6_tutorial01.py b/.docs/Notebooks/mf6_tutorial01.py index b765d7f963..e399c1e44c 100644 --- a/.docs/Notebooks/mf6_tutorial01.py +++ b/.docs/Notebooks/mf6_tutorial01.py @@ -318,9 +318,7 @@ # # First extract the `FLOW-JA-FACE` array from the cell-by-cell budget file -flowja = gwf.oc.output.budget().get_data(text="FLOW-JA-FACE", kstpkper=(0, 0))[ - 0 -] +flowja = gwf.oc.output.budget().get_data(text="FLOW-JA-FACE", kstpkper=(0, 0))[0] # Next extract the flow residual. The MODFLOW 6 binary grid file is passed # into the function because it contains the ia array that defines diff --git a/.docs/Notebooks/mf_tutorial02.py b/.docs/Notebooks/mf_tutorial02.py index 96a3fcd589..345d329aab 100644 --- a/.docs/Notebooks/mf_tutorial02.py +++ b/.docs/Notebooks/mf_tutorial02.py @@ -183,9 +183,7 @@ "print head", "print budget", ] -oc = flopy.modflow.ModflowOc( - mf, stress_period_data=stress_period_data, compact=True -) +oc = flopy.modflow.ModflowOc(mf, stress_period_data=stress_period_data, compact=True) # ## Running the Model # diff --git a/.docs/Notebooks/mfusg_conduit_examples.py b/.docs/Notebooks/mfusg_conduit_examples.py index 187cabd628..b0741a7d44 100644 --- a/.docs/Notebooks/mfusg_conduit_examples.py +++ b/.docs/Notebooks/mfusg_conduit_examples.py @@ -42,9 +42,7 @@ # A vertical conduit well is located at the center of the domain and has a radius of 0.5 m. The well pumps 62,840 m3/d and is open fully to both aquifers from top to bottom. The CLN Process was used with a circular conduit geometry type to discretize the well bore with two conduit cells, one in each layer. The WEL Package was used to pump from the bottom CLN cell. # -model_ws = os.path.join( - "../../examples/data/mfusg_test", "03_conduit_confined" -) +model_ws = os.path.join("../../examples/data/mfusg_test", "03_conduit_confined") mf = flopy.mfusg.MfUsg.load( "ex3.nam", model_ws=model_ws, exe_name="mfusg", check=False, verbose=True ) @@ -58,9 +56,7 @@ for j in range(mf.dis.nstp[i]): spd[(i, j)] = ["save head", "save budget"] -oc = flopy.modflow.ModflowOc( - mf, stress_period_data=spd, unitnumber=[22, 30, 31, 50] -) +oc = flopy.modflow.ModflowOc(mf, stress_period_data=spd, unitnumber=[22, 30, 31, 50]) # + model_ws = os.path.join(cln_ws, "ex03") @@ -109,9 +105,7 @@ # + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): - simflow = np.append( - simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0] - ) + simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]) simflow1 = simflow[simflow["node"] == 1]["q"] simflow2 = simflow[simflow["node"] == 2]["q"] @@ -303,9 +297,7 @@ # + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): - simflow = np.append( - simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0] - ) + simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]) flow_case1 = simflow # - @@ -399,9 +391,7 @@ # + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): - simflow = np.append( - simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0] - ) + simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]) flow_case2 = simflow # - @@ -498,9 +488,7 @@ # + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): - simflow = np.append( - simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0] - ) + simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]) flow_case3 = simflow # - @@ -584,9 +572,7 @@ # + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): - simflow = np.append( - simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0] - ) + simflow = np.append(simflow, cbb.get_data(kstpkper=(i + 1, 0), text="GWF")[0]) flow_case4 = simflow # - diff --git a/.docs/Notebooks/mfusg_freyberg_example.py b/.docs/Notebooks/mfusg_freyberg_example.py index 7f92e40f8f..e4ad7797f0 100644 --- a/.docs/Notebooks/mfusg_freyberg_example.py +++ b/.docs/Notebooks/mfusg_freyberg_example.py @@ -185,9 +185,7 @@ masked_values=[-999.99], alpha=0.4, ) - contours = xsect.contour_array( - head2, levels=levels, alpha=1.0, colors="blue" - ) + contours = xsect.contour_array(head2, levels=levels, alpha=1.0, colors="blue") xsect.plot_inactive(ibound=ibound, color_noflow=(0.8, 0.8, 0.8)) xsect.plot_grid(alpha=0.2) ax.set_ylim([0, 40]) # set y axis range to ignore low elevations diff --git a/.docs/Notebooks/mfusg_zaidel_example.py b/.docs/Notebooks/mfusg_zaidel_example.py index 61d961412b..477e184948 100644 --- a/.docs/Notebooks/mfusg_zaidel_example.py +++ b/.docs/Notebooks/mfusg_zaidel_example.py @@ -167,9 +167,7 @@ left=None, bottom=None, right=None, top=None, wspace=0.25, hspace=0.25 ) ax = fig.add_subplot(1, 1, 1) -ax.plot( - x, mfusghead[0, 0, :], linewidth=0.75, color="blue", label="MODFLOW-USG" -) +ax.plot(x, mfusghead[0, 0, :], linewidth=0.75, color="blue", label="MODFLOW-USG") ax.fill_between(x, y1=botm[1, 0, :], y2=-5, color="0.5", alpha=0.5) leg = ax.legend(loc="upper right") leg.draw_frame(False) diff --git a/.docs/Notebooks/modelgrid_examples.py b/.docs/Notebooks/modelgrid_examples.py index 53f711cb63..acba38b8b5 100644 --- a/.docs/Notebooks/modelgrid_examples.py +++ b/.docs/Notebooks/modelgrid_examples.py @@ -54,9 +54,7 @@ # + # set paths to each of our model types for this example notebook -spth = os.path.join( - "..", "..", "examples", "data", "freyberg_multilayer_transient" -) +spth = os.path.join("..", "..", "examples", "data", "freyberg_multilayer_transient") spth6 = os.path.join("..", "..", "examples", "data", "mf6-freyberg") vpth = os.path.join("..", "..", "examples", "data") upth = os.path.join("..", "..", "examples", "data") @@ -137,9 +135,7 @@ epsg = modelgrid.epsg proj4 = modelgrid.proj4 -print( - f"xoff: {xoff}\nyoff: {yoff}\nangrot: {angrot}\nepsg: {epsg}\nproj4: {proj4}" -) +print(f"xoff: {xoff}\nyoff: {yoff}\nangrot: {angrot}\nepsg: {epsg}\nproj4: {proj4}") # - # #### Setting modelgrid reference information @@ -422,9 +418,7 @@ # + # simple functions to load vertices and indice lists def load_verts(fname): - verts = np.genfromtxt( - fname, dtype=[int, float, float], names=["iv", "x", "y"] - ) + verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"]) verts["iv"] -= 1 # zero based return verts @@ -554,9 +548,7 @@ def load_iverts(fname): # + # load a modflow-6 freyberg simulation -sim = flopy.mf6.MFSimulation.load( - sim_ws=spth6, verbosity_level=0, exe_name=mf6_exe -) +sim = flopy.mf6.MFSimulation.load(sim_ws=spth6, verbosity_level=0, exe_name=mf6_exe) # get a model object from the simulation ml = sim.get_model("freyberg") @@ -679,9 +671,7 @@ def load_iverts(fname): # plot arrays for ix, ax in enumerate(axs): pmv = flopy.plot.PlotMapView(modelgrid=modelgrid, ax=ax) - pc = pmv.plot_array( - arrays[ix], masked_values=[1e30], vmin=0, vmax=35, alpha=0.5 - ) + pc = pmv.plot_array(arrays[ix], masked_values=[1e30], vmin=0, vmax=35, alpha=0.5) pmv.plot_grid() pmv.plot_inactive() ax.set_title(f"Modelgrid: {labels[ix]}") @@ -818,9 +808,7 @@ def load_iverts(fname): # create a mf6 WEL package and add it to the existing model stress_period_data = {0: pdata} -wel = flopy.mf6.modflow.ModflowGwfwel( - ml, stress_period_data=stress_period_data -) +wel = flopy.mf6.modflow.ModflowGwfwel(ml, stress_period_data=stress_period_data) # plot the locations from the new WEL package on the modelgrid fig, ax = plt.subplots(figsize=(10, 10), subplot_kw={"aspect": "equal"}) diff --git a/.docs/Notebooks/modpath6_example.py b/.docs/Notebooks/modpath6_example.py index f0bfbc6f00..5620b023d2 100644 --- a/.docs/Notebooks/modpath6_example.py +++ b/.docs/Notebooks/modpath6_example.py @@ -146,9 +146,7 @@ fpth = os.path.join(model_ws, "starting_locs.shp") print(type(fpth)) -epobj.write_shapefile( - well_epd, direction="starting", shpname=fpth, mg=m.modelgrid -) +epobj.write_shapefile(well_epd, direction="starting", shpname=fpth, mg=m.modelgrid) # Read in the pathline file and subset to pathlines that terminated in the well . @@ -294,9 +292,7 @@ pthobj = flopy.utils.PathlineFile(os.path.join(model_ws, "ex6mnw.mppth")) epdobj = flopy.utils.EndpointFile(os.path.join(model_ws, "ex6mnw.mpend")) well_epd = epdobj.get_alldata() -well_pathlines = ( - pthobj.get_alldata() -) # returns a list of recarrays; one per pathline +well_pathlines = pthobj.get_alldata() # returns a list of recarrays; one per pathline # + fig = plt.figure(figsize=(8, 8)) @@ -311,9 +307,7 @@ ) plt.clabel(contour_set, inline=1, fontsize=14) -mapview.plot_pathline( - well_pathlines, travel_time="<10000", layer="all", colors="red" -) +mapview.plot_pathline(well_pathlines, travel_time="<10000", layer="all", colors="red") # - try: diff --git a/.docs/Notebooks/modpath7_create_simulation_example.py b/.docs/Notebooks/modpath7_create_simulation_example.py index f5f0975dde..9a779d0b81 100644 --- a/.docs/Notebooks/modpath7_create_simulation_example.py +++ b/.docs/Notebooks/modpath7_create_simulation_example.py @@ -81,9 +81,7 @@ def get_nodes(locs): nm = "ex01_mf6" # Create the Flopy simulation object -sim = flopy.mf6.MFSimulation( - sim_name=nm, exe_name=mfexe, version="mf6", sim_ws=ws -) +sim = flopy.mf6.MFSimulation(sim_name=nm, exe_name=mfexe, version="mf6", sim_ws=ws) # Create the Flopy temporal discretization object pd = (perlen, nstp, tsmult) @@ -133,9 +131,7 @@ def get_nodes(locs): flopy.mf6.modflow.mfgwfrcha.ModflowGwfrcha(gwf, recharge=rch) # wel wd = [(wel_loc, wel_q)] -flopy.mf6.modflow.mfgwfwel.ModflowGwfwel( - gwf, maxbound=1, stress_period_data={0: wd} -) +flopy.mf6.modflow.mfgwfwel.ModflowGwfwel(gwf, maxbound=1, stress_period_data={0: wd}) # river rd = [] for i in range(nrow): @@ -258,9 +254,7 @@ def get_nodes(locs): colors = ["green", "orange", "red"] # + -f, axes = plt.subplots( - ncols=3, nrows=2, sharey=True, sharex=True, figsize=(15, 10) -) +f, axes = plt.subplots(ncols=3, nrows=2, sharey=True, sharex=True, figsize=(15, 10)) axes = axes.flatten() idax = 0 diff --git a/.docs/Notebooks/mt3d-usgs_example.py b/.docs/Notebooks/mt3d-usgs_example.py index 008e285b36..8ab2167fb3 100644 --- a/.docs/Notebooks/mt3d-usgs_example.py +++ b/.docs/Notebooks/mt3d-usgs_example.py @@ -389,9 +389,7 @@ def calc_strtElev(X, Y): "CrnkNic.gag5", "CrnkNic.gag6", ] -gage = flopy.modflow.ModflowGage( - mf, numgage=6, gage_data=gages, filenames=files -) +gage = flopy.modflow.ModflowGage(mf, numgage=6, gage_data=gages, filenames=files) # Instantiate linkage with mass transport routing (LMT) package for MODFLOW-NWT (generates linker file) @@ -589,7 +587,9 @@ def load_ts_from_otis(fname, iobs=1): ts5_mt3d = load_ts_from_SFT_output(fname_SFTout, nd=619) # OTIS results located here -fname_OTIS = "../../examples/data/mt3d_test/mfnwt_mt3dusgs/sft_crnkNic/OTIS_solution.out" +fname_OTIS = ( + "../../examples/data/mt3d_test/mfnwt_mt3dusgs/sft_crnkNic/OTIS_solution.out" +) # Loading OTIS output ts1_Otis = load_ts_from_otis(fname_OTIS, 1) diff --git a/.docs/Notebooks/mt3dms_examples.py b/.docs/Notebooks/mt3dms_examples.py index 77996876db..4c2cd38625 100644 --- a/.docs/Notebooks/mt3dms_examples.py +++ b/.docs/Notebooks/mt3dms_examples.py @@ -1314,9 +1314,7 @@ def p08(dirname, mixelm): mx.plot_array(hk, masked_values=[hk[0, 0, 0]], alpha=0.2) mx.plot_ibound() mx.plot_grid(color="0.5", alpha=0.2) -cs = mx.contour_array( - conc[3], levels=[0.05, 0.1, 0.15, 0.19], masked_values=[1.0e30] -) +cs = mx.contour_array(conc[3], levels=[0.05, 0.1, 0.15, 0.19], masked_values=[1.0e30]) ax.set_title("TIME = 20 YEARS") @@ -1525,9 +1523,7 @@ def p10(dirname, mixelm, perlen=1000, isothm=1, sp2=0.0, ttsmult=1.2): nrow = 61 ncol = 40 delr = ( - [2000, 1600, 800, 400, 200, 100] - + 28 * [50] - + [100, 200, 400, 800, 1600, 2000] + [2000, 1600, 800, 400, 200, 100] + 28 * [50] + [100, 200, 400, 800, 1600, 2000] ) delc = ( [2000, 2000, 2000, 1600, 800, 400, 200, 100] @@ -1661,9 +1657,7 @@ def p10(dirname, mixelm, perlen=1000, isothm=1, sp2=0.0, ttsmult=1.2): ) dsp = flopy.mt3d.Mt3dDsp(mt, al=al, trpt=trpt, trpv=trpv) ssm = flopy.mt3d.Mt3dSsm(mt, crch=0.0) - rct = flopy.mt3d.Mt3dRct( - mt, isothm=isothm, igetsc=0, rhob=1.7, sp1=0.176, sp2=sp2 - ) + rct = flopy.mt3d.Mt3dRct(mt, isothm=isothm, igetsc=0, rhob=1.7, sp1=0.176, sp2=sp2) mxiter = 1 if isothm == 4: mxiter = 50 @@ -1774,12 +1768,8 @@ def p10(dirname, mixelm, perlen=1000, isothm=1, sp2=0.0, ttsmult=1.2): mf, mt, conctvd, cvttvd, mvt0 = p10("p10", 0, perlen=2000, isothm=0) mf, mt, conctvd, cvttvd, mvt1 = p10("p10", 0, perlen=2000, isothm=1) mf, mt, conctvd, cvttvd, mvt2 = p10("p10", 0, perlen=2000, isothm=4, sp2=0.1) -mf, mt, conctvd, cvttvd, mvt3 = p10( - "p10", 0, perlen=2000, isothm=4, sp2=1.5e-4 -) -mf, mt, conctvd, cvttvd, mvt4 = p10( - "p10", 0, perlen=2000, isothm=4, sp2=1.0e-6 -) +mf, mt, conctvd, cvttvd, mvt3 = p10("p10", 0, perlen=2000, isothm=4, sp2=1.5e-4) +mf, mt, conctvd, cvttvd, mvt4 = p10("p10", 0, perlen=2000, isothm=4, sp2=1.0e-6) fig = plt.figure(figsize=(10, 8)) ax = fig.add_subplot(1, 1, 1) diff --git a/.docs/Notebooks/mt3dms_sft_lkt_uzt_tutorial.py b/.docs/Notebooks/mt3dms_sft_lkt_uzt_tutorial.py index a743fb31c4..55ad5251f9 100644 --- a/.docs/Notebooks/mt3dms_sft_lkt_uzt_tutorial.py +++ b/.docs/Notebooks/mt3dms_sft_lkt_uzt_tutorial.py @@ -303,9 +303,7 @@ 0, 300, 1 ): # These indices need to be adjusted for 0-based foolishness # Skipping cells not satisfying the conditions below - if (i == 1 and (j < 27 or j > 31)) or ( - i == 299 and (j < 26 or j > 31) - ): + if (i == 1 and (j < 27 or j > 31)) or (i == 299 and (j < 26 or j > 31)): if i % 2 == 0: sp.append( [ @@ -515,9 +513,7 @@ ] numgage = len(gages) -gage = flopy.modflow.ModflowGage( - mf, numgage=numgage, gage_data=gages, filenames=files -) +gage = flopy.modflow.ModflowGage(mf, numgage=numgage, gage_data=gages, filenames=files) # - # ### Instantiate Unsaturated-Zone Flow (UZF) package for MODFLOW-NWT @@ -628,9 +624,7 @@ # Create a dictionary, 1 entry for each of the two stress periods. stress_period_data = {0: stress_period_data, 1: stress_period_data} -drn = flopy.modflow.ModflowDrn( - mf, ipakcb=ipakcb, stress_period_data=stress_period_data -) +drn = flopy.modflow.ModflowDrn(mf, ipakcb=ipakcb, stress_period_data=stress_period_data) # - # ### Instantiate linkage with mass transport routing (LMT) package for MODFLOW-NWT (generates linker file) @@ -712,9 +706,7 @@ mxpart = 5000 nadvfd = 1 # (1 = Upstream weighting) -adv = flopy.mt3d.Mt3dAdv( - mt, mixelm=mixelm, percel=percel, mxpart=mxpart, nadvfd=nadvfd -) +adv = flopy.mt3d.Mt3dAdv(mt, mixelm=mixelm, percel=percel, mxpart=mxpart, nadvfd=nadvfd) # - # ### Instantiate generalized conjugate gradient solver (GCG) package for MT3D-USGS @@ -748,9 +740,7 @@ trpv = 0.1 # ratio of the vertical transverse dispersitvity to 'AL' dmcoef = 1.0000e-10 -dsp = flopy.mt3d.Mt3dDsp( - mt, al=al, trpt=trpt, trpv=trpv, dmcoef=dmcoef, multiDiff=True -) +dsp = flopy.mt3d.Mt3dDsp(mt, al=al, trpt=trpt, trpv=trpv, dmcoef=dmcoef, multiDiff=True) # - # ### Instantiate source-sink mixing (SSM) package for MT3D-USGS diff --git a/.docs/Notebooks/nwt_option_blocks_tutorial.py b/.docs/Notebooks/nwt_option_blocks_tutorial.py index 0bd0d45645..e3440cc1e1 100644 --- a/.docs/Notebooks/nwt_option_blocks_tutorial.py +++ b/.docs/Notebooks/nwt_option_blocks_tutorial.py @@ -103,9 +103,7 @@ # And let's load the new UZF file -uzf2 = flopy.modflow.ModflowUzf1.load( - os.path.join(model_ws, uzf_name), ml, check=False -) +uzf2 = flopy.modflow.ModflowUzf1.load(os.path.join(model_ws, uzf_name), ml, check=False) # ### Now we can look at the options object, and check if it's block or line format # @@ -121,9 +119,7 @@ uzf2.write_file(os.path.join(model_ws, uzf_name)) ml.remove_package("UZF") -uzf3 = flopy.modflow.ModflowUzf1.load( - os.path.join(model_ws, uzf_name), ml, check=False -) +uzf3 = flopy.modflow.ModflowUzf1.load(os.path.join(model_ws, uzf_name), ml, check=False) print("\n") print(uzf3.options) print(uzf3.options.block) diff --git a/.docs/Notebooks/pest_tutorial01.py b/.docs/Notebooks/pest_tutorial01.py index da39e76560..8b591c5f75 100644 --- a/.docs/Notebooks/pest_tutorial01.py +++ b/.docs/Notebooks/pest_tutorial01.py @@ -69,9 +69,7 @@ ubound = 1000.0 transform = "log" -p = flopy.pest.Params( - mfpackage, partype, parname, startvalue, lbound, ubound, span -) +p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span) # - # At this point, we have enough information to the write a PEST template file for the LPF package. We can do this using the following statement: @@ -101,9 +99,7 @@ ubound = 1000.0 transform = "log" -p = flopy.pest.Params( - mfpackage, partype, parname, startvalue, lbound, ubound, span -) +p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span) tw = flopy.pest.templatewriter.TemplateWriter(m, [p]) tw.write_template() # - @@ -193,9 +189,7 @@ # For a recharge multiplier, span['idx'] must be None idx = None span = {"kpers": [0, 1, 2], "idx": idx} -p = flopy.pest.Params( - mfpackage, partype, parname, startvalue, lbound, ubound, span -) +p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span) plist.append(p) # - @@ -224,9 +218,7 @@ # For a recharge multiplier, span['idx'] must be None span = {"kpers": [1, 2], "idx": None} -p = flopy.pest.Params( - mfpackage, partype, parname, startvalue, lbound, ubound, span -) +p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span) plist.append(p) # + @@ -243,9 +235,7 @@ idx = np.empty((nrow, ncol), dtype=bool) idx[0:3, 0:3] = True span = {"kpers": [1], "idx": idx} -p = flopy.pest.Params( - mfpackage, partype, parname, startvalue, lbound, ubound, span -) +p = flopy.pest.Params(mfpackage, partype, parname, startvalue, lbound, ubound, span) plist.append(p) # + diff --git a/.docs/Notebooks/plot_cross_section_example.py b/.docs/Notebooks/plot_cross_section_example.py index d5d2ef381b..f80b20632e 100644 --- a/.docs/Notebooks/plot_cross_section_example.py +++ b/.docs/Notebooks/plot_cross_section_example.py @@ -181,9 +181,7 @@ csa = xsect.plot_array(a) patches = xsect.plot_ibound() linecollection = xsect.plot_grid() -t = ax.set_title( - "Column 6 Cross-Section with Horizontal hydraulic conductivity" -) +t = ax.set_title("Column 6 Cross-Section with Horizontal hydraulic conductivity") cb = plt.colorbar(csa, shrink=0.75) # + [markdown] pycharm={"name": "#%% md\n"} @@ -459,9 +457,7 @@ csa = xsect.plot_array(a) patches = xsect.plot_ibound() linecollection = xsect.plot_grid() -t = ax.set_title( - "Column 6 Cross-Section with Horizontal hydraulic conductivity" -) +t = ax.set_title("Column 6 Cross-Section with Horizontal hydraulic conductivity") cb = plt.colorbar(csa, shrink=0.75) # + [markdown] pycharm={"name": "#%% md\n"} @@ -479,9 +475,7 @@ cbc_file = os.path.join(modelpth, "freyberg.cbc") cbc = flopy.utils.CellBudgetFile(cbc_file, precision="double") spdis = cbc.get_data(text="SPDIS")[-1] -qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge( - spdis, ml6, head=head -) +qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(spdis, ml6, head=head) fig = plt.figure(figsize=(18, 5)) ax = fig.add_subplot(1, 1, 1) @@ -696,9 +690,7 @@ def run_vertex_grid_example(ws): # riv riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] rivcells = g.intersect(riverline, "line", 0) - rivspd = [ - [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] - ] + rivspd = [[(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]] riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) # output control @@ -1198,9 +1190,7 @@ def build_mf6gwt(sim_folder): ("GWFHEAD", "../mf6gwf/flow.hds"), ("GWFBUDGET", "../mf6gwf/flow.bud"), ] - flopy.mf6.ModflowGwtfmi( - gwt, flow_imbalance_correction=True, packagedata=pd - ) + flopy.mf6.ModflowGwtfmi(gwt, flow_imbalance_correction=True, packagedata=pd) sourcerecarray = [ ("RCH-1", "AUX", "CONCENTRATION"), ] @@ -1239,9 +1229,7 @@ def build_mf6gwt(sim_folder): ("obs2", "CONCENTRATION", obs2), ], } - flopy.mf6.ModflowUtlobs( - gwt, digits=10, print_input=True, continuous=obs_data - ) + flopy.mf6.ModflowUtlobs(gwt, digits=10, print_input=True, continuous=obs_data) return sim @@ -1322,9 +1310,7 @@ def run_keating_model(ws=example_name, silent=True): # set labels using styles styles.xlabel(label="x-position (m)") styles.ylabel(label="elevation (m)") - styles.heading( - letter="A.", heading="Simulated hydraulic head", fontsize=10 - ) + styles.heading(letter="A.", heading="Simulated hydraulic head", fontsize=10) ax.set_aspect(1.0) # + [markdown] pycharm={"name": "#%% md\n"} diff --git a/.docs/Notebooks/plot_map_view_example.py b/.docs/Notebooks/plot_map_view_example.py index bb79a16e99..950f752727 100644 --- a/.docs/Notebooks/plot_map_view_example.py +++ b/.docs/Notebooks/plot_map_view_example.py @@ -378,9 +378,7 @@ mapview = flopy.plot.PlotMapView(model=ml) quadmesh = mapview.plot_ibound() quadmesh = mapview.plot_array(head, alpha=0.5) -quiver = mapview.plot_vector( - sqx, sqy -) # include the head array for specific discharge +quiver = mapview.plot_vector(sqx, sqy) # include the head array for specific discharge linecollection = mapview.plot_grid() # + [markdown] pycharm={"name": "#%% md\n"} @@ -570,9 +568,7 @@ patch_collection1 = mapview.plot_shapes(cross_section, lw=3, edgecolor="red") # plot_point(s) -patch_collection3 = mapview.plot_shapes( - wells, radius=100, facecolor="k", edgecolor="k" -) +patch_collection3 = mapview.plot_shapes(wells, radius=100, facecolor="k", edgecolor="k") # + [markdown] pycharm={"name": "#%% md\n"} # ## Working with MODFLOW-6 models @@ -880,9 +876,7 @@ def run_vertex_grid_example(ws): # riv riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] rivcells = g.intersect(riverline, "line", 0) - rivspd = [ - [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] - ] + rivspd = [[(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]] riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) # output control @@ -1158,9 +1152,7 @@ def run_vertex_grid_example(ws): pline = mapview.plot_pathline(p0, layer="all", color="blue", lw=0.75) colors = ["green", "orange", "red"] for k in range(3): - tseries = mapview.plot_timeseries( - ts0, layer=k, marker="o", lw=0, color=colors[k] - ) + tseries = mapview.plot_timeseries(ts0, layer=k, marker="o", lw=0, color=colors[k]) # + [markdown] pycharm={"name": "#%% md\n"} # ### Plotting specific discharge vectors for DISV @@ -1171,9 +1163,7 @@ def run_vertex_grid_example(ws): os.path.join(modelpth, "mp7p2.cbb"), precision="double" ) spdis = cbb.get_data(text="SPDIS")[0] -qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge( - spdis, vertex_ml6 -) +qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(spdis, vertex_ml6) fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(1, 1, 1, aspect="equal") @@ -1202,9 +1192,7 @@ def run_vertex_grid_example(ws): # simple functions to load vertices and incidence lists def load_verts(fname): - verts = np.genfromtxt( - fname, dtype=[int, float, float], names=["iv", "x", "y"] - ) + verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"]) verts["iv"] -= 1 # zero based return verts @@ -1326,9 +1314,7 @@ def load_iverts(fname): plt.colorbar(quadmesh, shrink=0.75) # use styles to add a heading, xlabel, ylabel - styles.heading( - letter="A.", heading="Specific Discharge (" + r"$L/T$" + ")" - ) + styles.heading(letter="A.", heading="Specific Discharge (" + r"$L/T$" + ")") styles.xlabel(label="Easting") styles.ylabel(label="Northing") diff --git a/.docs/Notebooks/raster_intersection_example.py b/.docs/Notebooks/raster_intersection_example.py index 447581faad..2f6c09da85 100644 --- a/.docs/Notebooks/raster_intersection_example.py +++ b/.docs/Notebooks/raster_intersection_example.py @@ -92,9 +92,7 @@ # + model_ws = os.path.join("..", "..", "examples", "data", "options", "sagehen") -ml = flopy.modflow.Modflow.load( - "sagehen.nam", version="mfnwt", model_ws=model_ws -) +ml = flopy.modflow.Modflow.load("sagehen.nam", version="mfnwt", model_ws=model_ws) xoff = 214110 yoff = 4366620 @@ -132,9 +130,7 @@ # + `"mean"`, `"median"`, `"min"`, `"max"`, and `"mode"` are a function of the number of grid cells. t0 = time.time() -dem_data = rio.resample_to_grid( - ml.modelgrid, band=rio.bands[0], method="nearest" -) +dem_data = rio.resample_to_grid(ml.modelgrid, band=rio.bands[0], method="nearest") resample_time = time.time() - t0 # + @@ -143,9 +139,7 @@ ax = fig.add_subplot(1, 1, 1, aspect="equal") pmv = flopy.plot.PlotMapView(modelgrid=ml.modelgrid, ax=ax) -ax = pmv.plot_array( - dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax -) +ax = pmv.plot_array(dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax) plt.title(f"Resample time, nearest neighbor: {resample_time:.3f} sec") plt.colorbar(ax, shrink=0.7) # - @@ -162,9 +156,7 @@ ax = fig.add_subplot(1, 1, 1, aspect="equal") pmv = flopy.plot.PlotMapView(modelgrid=ml.modelgrid, ax=ax) -ax = pmv.plot_array( - dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax -) +ax = pmv.plot_array(dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax) plt.title(f"Resample time, bi-linear: {resample_time:.3f} sec") plt.colorbar(ax, shrink=0.7) # - @@ -181,9 +173,7 @@ ax = fig.add_subplot(1, 1, 1, aspect="equal") pmv = flopy.plot.PlotMapView(modelgrid=ml.modelgrid, ax=ax) -ax = pmv.plot_array( - dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax -) +ax = pmv.plot_array(dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax) plt.title(f"Resample time, bi-cubic: {resample_time:.3f} sec") plt.colorbar(ax, shrink=0.7) # - @@ -203,9 +193,7 @@ ax = fig.add_subplot(1, 1, 1, aspect="equal") pmv = flopy.plot.PlotMapView(modelgrid=ml.modelgrid, ax=ax) -ax = pmv.plot_array( - dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax -) +ax = pmv.plot_array(dem_data, masked_values=rio.nodatavals, vmin=vmin, vmax=vmax) plt.title(f"Resample time, median: {resample_time:.3f} sec") plt.colorbar(ax, shrink=0.7) # - @@ -255,9 +243,7 @@ # + t0 = time.time() -dem_data = rio.resample_to_grid( - mg_unstruct, band=rio.bands[0], method="nearest" -) +dem_data = rio.resample_to_grid(mg_unstruct, band=rio.bands[0], method="nearest") resample_time = time.time() - t0 @@ -279,9 +265,7 @@ # + t0 = time.time() -dem_data = rio.resample_to_grid( - mg_unstruct, band=rio.bands[0], method="linear" -) +dem_data = rio.resample_to_grid(mg_unstruct, band=rio.bands[0], method="linear") resample_time = time.time() - t0 @@ -434,9 +418,7 @@ # + t0 = time.time() -dem_data = rio.resample_to_grid( - mg_unstruct, band=rio.bands[0], method="nearest" -) +dem_data = rio.resample_to_grid(mg_unstruct, band=rio.bands[0], method="nearest") resample_time = time.time() - t0 @@ -459,9 +441,7 @@ # + t0 = time.time() -dem_data = rio.resample_to_grid( - mg_unstruct, band=rio.bands[0], method="linear" -) +dem_data = rio.resample_to_grid(mg_unstruct, band=rio.bands[0], method="linear") resample_time = time.time() - t0 @@ -563,9 +543,7 @@ ib = pmv.plot_ibound(ibound) pmv.plot_grid(linewidth=0.3) plt.plot(shape[0], shape[1], "r-") -plt.title( - "Model top and ibound arrays created using bi-linear raster resampling" -) +plt.title("Model top and ibound arrays created using bi-linear raster resampling") plt.colorbar(ax, shrink=0.7) # - diff --git a/.docs/Notebooks/save_binary_data_file_example.py b/.docs/Notebooks/save_binary_data_file_example.py index b75469f8ce..032685cdc4 100644 --- a/.docs/Notebooks/save_binary_data_file_example.py +++ b/.docs/Notebooks/save_binary_data_file_example.py @@ -51,9 +51,7 @@ dtype = np.float32 # or np.float64 mf = flopy.modflow.Modflow(model_ws=model_ws) -dis = flopy.modflow.ModflowDis( - mf, nlay=nlay, nrow=nrow, ncol=ncol, delr=20, delc=10 -) +dis = flopy.modflow.ModflowDis(mf, nlay=nlay, nrow=nrow, ncol=ncol, delr=20, delc=10) # - # Create a linear data array diff --git a/.docs/Notebooks/seawat_henry_example.py b/.docs/Notebooks/seawat_henry_example.py index 7307f9015c..3d4de1b0ec 100644 --- a/.docs/Notebooks/seawat_henry_example.py +++ b/.docs/Notebooks/seawat_henry_example.py @@ -185,9 +185,7 @@ fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(1, 1, 1, aspect="equal") -ax.imshow( - concentration[:, 0, :], interpolation="nearest", extent=(0, Lx, 0, Lz) -) +ax.imshow(concentration[:, 0, :], interpolation="nearest", extent=(0, Lx, 0, Lz)) y, x, z = dis.get_node_coordinates() X, Z = np.meshgrid(x, z[:, 0, 0]) iskip = 3 diff --git a/.docs/Notebooks/sfrpackage_example.py b/.docs/Notebooks/sfrpackage_example.py index 6ce4273e9d..43c63a0540 100644 --- a/.docs/Notebooks/sfrpackage_example.py +++ b/.docs/Notebooks/sfrpackage_example.py @@ -242,18 +242,14 @@ # ### Plot leakage in plan view -im = plt.imshow( - sfrleak[0], interpolation="none", cmap="coolwarm", vmin=-3, vmax=3 -) +im = plt.imshow(sfrleak[0], interpolation="none", cmap="coolwarm", vmin=-3, vmax=3) cb = plt.colorbar(im, label="SFR Leakage, in cubic feet per second") # ### Plot total streamflow sfrQ = sfrleak[0].copy() sfrQ[sfrQ == 0] = np.nan -sfrQ[df.row.values - 1, df.column.values - 1] = ( - df[["Qin", "Qout"]].mean(axis=1).values -) +sfrQ[df.row.values - 1, df.column.values - 1] = df[["Qin", "Qout"]].mean(axis=1).values im = plt.imshow(sfrQ, interpolation="none") plt.colorbar(im, label="Streamflow, in cubic feet per second") diff --git a/.docs/Notebooks/shapefile_feature_examples.py b/.docs/Notebooks/shapefile_feature_examples.py index 4d0bf9ff76..13f970a428 100644 --- a/.docs/Notebooks/shapefile_feature_examples.py +++ b/.docs/Notebooks/shapefile_feature_examples.py @@ -99,9 +99,7 @@ # + from pathlib import Path -recarray2shp( - chk.summary_array, geoms, os.path.join(workspace, "test.shp"), crs=26715 -) +recarray2shp(chk.summary_array, geoms, os.path.join(workspace, "test.shp"), crs=26715) shape_path = os.path.join(workspace, "test.prj") # + pycharm={"name": "#%%\n"} diff --git a/.docs/Notebooks/swi2package_example1.py b/.docs/Notebooks/swi2package_example1.py index 4241716e6d..6e6745d1fc 100644 --- a/.docs/Notebooks/swi2package_example1.py +++ b/.docs/Notebooks/swi2package_example1.py @@ -150,9 +150,7 @@ hfile = flopy.utils.HeadFile(os.path.join(ml.model_ws, f"{modelname}.hds")) head = hfile.get_alldata() # read model zeta -zfile = flopy.utils.CellBudgetFile( - os.path.join(ml.model_ws, f"{modelname}.zta") -) +zfile = flopy.utils.CellBudgetFile(os.path.join(ml.model_ws, f"{modelname}.zta")) kstpkper = zfile.get_kstpkper() zeta = [] for kk in kstpkper: diff --git a/.docs/Notebooks/swi2package_example2.py b/.docs/Notebooks/swi2package_example2.py index 4820f321f5..7a8ac4deeb 100644 --- a/.docs/Notebooks/swi2package_example2.py +++ b/.docs/Notebooks/swi2package_example2.py @@ -291,9 +291,7 @@ steady=False, ) bas = flopy.modflow.ModflowBas(m, ibound=swt_ibound, strt=0.05) -lpf = flopy.modflow.ModflowLpf( - m, hk=2.0, vka=2.0, ss=0.0, sy=0.0, laytyp=0, layavg=0 -) +lpf = flopy.modflow.ModflowLpf(m, hk=2.0, vka=2.0, ss=0.0, sy=0.0, laytyp=0, layavg=0) oc = flopy.modflow.ModflowOc(m, save_every=1, save_types=["save head"]) pcg = flopy.modflow.ModflowPcg(m) # Create the MT3DMS model files @@ -331,9 +329,7 @@ mxstrn=1e8, ) dsp = flopy.mt3d.Mt3dDsp(m, al=0.0, trpt=1.0, trpv=1.0, dmcoef=0.0) -gcg = flopy.mt3d.Mt3dGcg( - m, mxiter=1, iter1=50, isolve=3, cclose=1e-6, iprgcg=5 -) +gcg = flopy.mt3d.Mt3dGcg(m, mxiter=1, iter1=50, isolve=3, cclose=1e-6, iprgcg=5) ssm = flopy.mt3d.Mt3dSsm(m, stress_period_data=ssm_data) # Create the SEAWAT model files vdf = flopy.seawat.SeawatVdf( diff --git a/.docs/Notebooks/swi2package_example3.py b/.docs/Notebooks/swi2package_example3.py index 4b0486d14f..3063c6d96b 100644 --- a/.docs/Notebooks/swi2package_example3.py +++ b/.docs/Notebooks/swi2package_example3.py @@ -130,9 +130,9 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc): # Define SWI2 data. -zini = np.hstack( - (-9 * np.ones(24), np.arange(-9, -50, -0.5), -50 * np.ones(94)) -)[np.newaxis, :] +zini = np.hstack((-9 * np.ones(24), np.arange(-9, -50, -0.5), -50 * np.ones(94)))[ + np.newaxis, : +] iso = np.zeros((1, 200), dtype=int) iso[:, :30] = -2 @@ -247,9 +247,7 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc): ax.plot(x[p], zr[p], color=cc[0], linewidth=lw, drawstyle="steps-mid") # for i in range(5): - zt = MergeData( - ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge - ) + zt = MergeData(ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge) dr = zt.copy() ax.plot(x, dr, color=cc[i + 1], linewidth=lw, drawstyle="steps-mid") # Manufacture a legend bar @@ -278,9 +276,7 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc): ) # for i in range(4, 10): - zt = MergeData( - ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge - ) + zt = MergeData(ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge) dr = zt.copy() ax.plot(x, dr, color=cc[i + 1], linewidth=lw, drawstyle="steps-mid") # Manufacture a legend bar @@ -308,9 +304,7 @@ def LegBar(ax, x0, y0, t0, dx, dy, dt, cc): ec=[0.8, 0.8, 0.8], ) # -zt = MergeData( - ncol, [zeta[4, 0, 0, :], zeta[4, 1, 0, :], zeta[4, 2, 0, :]], zedge -) +zt = MergeData(ncol, [zeta[4, 0, 0, :], zeta[4, 1, 0, :], zeta[4, 2, 0, :]], zedge) ax.plot( x, zt, diff --git a/.docs/Notebooks/swi2package_example4.py b/.docs/Notebooks/swi2package_example4.py index 5ecac1b63c..3aa9c426d1 100644 --- a/.docs/Notebooks/swi2package_example4.py +++ b/.docs/Notebooks/swi2package_example4.py @@ -236,9 +236,7 @@ iswizt=55, ) oc = flopy.modflow.ModflowOc(ml, stress_period_data=spd) -pcg = flopy.modflow.ModflowPcg( - ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50 -) +pcg = flopy.modflow.ModflowPcg(ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50) # - # Write the simulation 1 MODFLOW input files and run the model @@ -293,9 +291,7 @@ iswizt=55, ) oc = flopy.modflow.ModflowOc(ml2, stress_period_data=spd) -pcg = flopy.modflow.ModflowPcg( - ml2, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50 -) +pcg = flopy.modflow.ModflowPcg(ml2, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50) # - # Write the simulation 2 MODFLOW input files and run the model @@ -306,34 +302,26 @@ # Load the simulation 1 `ZETA` data and `ZETA` observations. # read base model zeta -zfile = flopy.utils.CellBudgetFile( - os.path.join(ml.model_ws, f"{modelname}.zta") -) +zfile = flopy.utils.CellBudgetFile(os.path.join(ml.model_ws, f"{modelname}.zta")) kstpkper = zfile.get_kstpkper() zeta = [] for kk in kstpkper: zeta.append(zfile.get_data(kstpkper=kk, text="ZETASRF 1")[0]) zeta = np.array(zeta) # read swi obs -zobs = np.genfromtxt( - os.path.join(ml.model_ws, f"{modelname}.zobs.out"), names=True -) +zobs = np.genfromtxt(os.path.join(ml.model_ws, f"{modelname}.zobs.out"), names=True) # Load the simulation 2 `ZETA` data and `ZETA` observations. # read saltwater well model zeta -zfile2 = flopy.utils.CellBudgetFile( - os.path.join(ml2.model_ws, f"{modelname2}.zta") -) +zfile2 = flopy.utils.CellBudgetFile(os.path.join(ml2.model_ws, f"{modelname2}.zta")) kstpkper = zfile2.get_kstpkper() zeta2 = [] for kk in kstpkper: zeta2.append(zfile2.get_data(kstpkper=kk, text="ZETASRF 1")[0]) zeta2 = np.array(zeta2) # read swi obs -zobs2 = np.genfromtxt( - os.path.join(ml2.model_ws, f"{modelname2}.zobs.out"), names=True -) +zobs2 = np.genfromtxt(os.path.join(ml2.model_ws, f"{modelname2}.zobs.out"), names=True) # Create arrays for the x-coordinates and the output years @@ -609,9 +597,7 @@ modelxsect = flopy.plot.PlotCrossSection( model=ml, line={"Row": 30}, extent=(0, 3050, -50, -10) ) -modelxsect.plot_fill_between( - zeta[4, :, :, :], colors=colors, ax=ax, edgecolors="none" -) +modelxsect.plot_fill_between(zeta[4, :, :, :], colors=colors, ax=ax, edgecolors="none") linecollection = modelxsect.plot_grid(ax=ax) ax.set_title(f"Recharge year {years[4]}") diff --git a/.docs/Notebooks/swi2package_example5.py b/.docs/Notebooks/swi2package_example5.py index 1482352bb6..36e0ca4c50 100644 --- a/.docs/Notebooks/swi2package_example5.py +++ b/.docs/Notebooks/swi2package_example5.py @@ -249,9 +249,7 @@ solver2params=solver2params, ) oc = flopy.modflow.ModflowOc(ml, stress_period_data=ocspd) -pcg = flopy.modflow.ModflowPcg( - ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50 -) +pcg = flopy.modflow.ModflowPcg(ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50) # Write input files and run the SWI2 model. @@ -382,9 +380,7 @@ ) wel = flopy.modflow.ModflowWel(m, stress_period_data=well_data) oc = flopy.modflow.ModflowOc(m, save_every=365, save_types=["save head"]) -pcg = flopy.modflow.ModflowPcg( - m, hclose=1.0e-5, rclose=3.0e-3, mxiter=100, iter1=50 -) +pcg = flopy.modflow.ModflowPcg(m, hclose=1.0e-5, rclose=3.0e-3, mxiter=100, iter1=50) # Create the basic MT3DMS model data adv = flopy.mt3d.Mt3dAdv( m, diff --git a/.docs/Notebooks/uzf_example.py b/.docs/Notebooks/uzf_example.py index ab206e0c39..880d55e1df 100644 --- a/.docs/Notebooks/uzf_example.py +++ b/.docs/Notebooks/uzf_example.py @@ -134,9 +134,7 @@ m.nrow_ncol_nlay_nper # + -finf = np.loadtxt( - proj_root / "examples" / "data" / "uzf_examples" / "finf.dat" -) +finf = np.loadtxt(proj_root / "examples" / "data" / "uzf_examples" / "finf.dat") finf = np.reshape(finf, (m.nper, m.nrow, m.ncol)) finf = {i: finf[i] for i in range(finf.shape[0])} @@ -160,9 +158,7 @@ # Define `extwc` (extinction water content) array. # + -extwc = np.loadtxt( - proj_root / "examples" / "data" / "uzf_examples" / "extwc.dat" -) +extwc = np.loadtxt(proj_root / "examples" / "data" / "uzf_examples" / "extwc.dat") fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(1, 1, 1, aspect="equal") diff --git a/.docs/Notebooks/vtk_pathlines_example.py b/.docs/Notebooks/vtk_pathlines_example.py index 647c8d56f1..0bb3657e2e 100644 --- a/.docs/Notebooks/vtk_pathlines_example.py +++ b/.docs/Notebooks/vtk_pathlines_example.py @@ -101,9 +101,7 @@ # + import numpy as np -wel_locs = [ - (rec[0][1], rec[0][2]) for rec in (gwf.wel.stress_period_data.data[0]) -] +wel_locs = [(rec[0][1], rec[0][2]) for rec in (gwf.wel.stress_period_data.data[0])] print(wel_locs) # - diff --git a/.docs/Notebooks/zonebudget_example.py b/.docs/Notebooks/zonebudget_example.py index ea9ee0bc04..fc57d9bc36 100644 --- a/.docs/Notebooks/zonebudget_example.py +++ b/.docs/Notebooks/zonebudget_example.py @@ -219,9 +219,7 @@ zb.get_budget(names=["STORAGE", "WELLS"], zones=["SURF", "UFA"], net=True) # - -df = zb.get_dataframes( - names=["STORAGE", "WELLS"], zones=["SURF", "UFA"], net=True -) +df = zb.get_dataframes(names=["STORAGE", "WELLS"], zones=["SURF", "UFA"], net=True) df.head(6) @@ -315,9 +313,7 @@ def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs): for idx, t in enumerate(times): ax = fig.add_subplot(1, len(times), idx + 1) - zb = flopy.utils.ZoneBudget( - cbc_f, zon, kstpkper=None, totim=t, aliases=aliases - ) + zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases) recname = "STORAGE" values_in = zb.get_dataframes(names=f"FROM_{recname}").T.squeeze() @@ -395,9 +391,7 @@ def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs): mt = ml.modeltime # budget recarray must be pivoted to get volumetric budget! -zonbud.get_volumetric_budget( - mt, recarray=zonbud.get_budget(net=True, pivot=True) -) +zonbud.get_volumetric_budget(mt, recarray=zonbud.get_budget(net=True, pivot=True)) # - try: diff --git a/.docs/create_rstfiles.py b/.docs/create_rstfiles.py index 6f705b092d..80436df6b9 100644 --- a/.docs/create_rstfiles.py +++ b/.docs/create_rstfiles.py @@ -34,11 +34,7 @@ def create_tutorials_rst(): rst_path = project_root_path / ".docs" / "tutorials.rst" nbs_path = project_root_path / ".docs" / "Notebooks" filenames = sorted( - [ - path.name - for path in nbs_path.rglob("*.py") - if "tutorial" in path.name - ] + [path.name for path in nbs_path.rglob("*.py") if "tutorial" in path.name] ) print(f"Creating {rst_path}") @@ -77,11 +73,7 @@ def create_examples_rst(): rst_path = project_root_path / ".docs" / "examples.rst" nbs_path = project_root_path / ".docs" / "Notebooks" filenames = sorted( - [ - path.name - for path in nbs_path.rglob("*.py") - if "example" in path.name - ] + [path.name for path in nbs_path.rglob("*.py") if "example" in path.name] ) print(f"Creating {rst_path}") diff --git a/.docs/groundwater_paper/scripts/uspb_capture.py b/.docs/groundwater_paper/scripts/uspb_capture.py index c1b76602fc..4fb25bf5d3 100644 --- a/.docs/groundwater_paper/scripts/uspb_capture.py +++ b/.docs/groundwater_paper/scripts/uspb_capture.py @@ -18,9 +18,7 @@ def cf_model(model, k, i, j, base, Q=-100): wel.write_file() model.run_model(silent=True) # get the results - hedObj = flopy.utils.HeadFile( - os.path.join(cf_pth, "DG.hds"), precision="double" - ) + hedObj = flopy.utils.HeadFile(os.path.join(cf_pth, "DG.hds"), precision="double") cbcObj = flopy.utils.CellBudgetFile( os.path.join(cf_pth, "DG.cbc"), precision="double" ) @@ -32,9 +30,7 @@ def cf_model(model, k, i, j, base, Q=-100): v[idx] = np.nan else: v1 = cbcObj.get_data(kstpkper=kon, text="DRAINS", full3D=True)[0] - v2 = cbcObj.get_data( - kstpkper=kon, text="STREAM LEAKAGE", full3D=True - )[0] + v2 = cbcObj.get_data(kstpkper=kon, text="STREAM LEAKAGE", full3D=True)[0] v3 = cbcObj.get_data(kstpkper=kon, text="ET", full3D=True)[0] v[idx] = ((v1.sum() + v2.sum() + v3.sum()) - base) / (-Q) return v @@ -58,9 +54,7 @@ def cf_model(model, k, i, j, base, Q=-100): ml.run_model() # get base model results -cbcObj = flopy.utils.CellBudgetFile( - os.path.join(cf_pth, "DG.cbc"), precision="double" -) +cbcObj = flopy.utils.CellBudgetFile(os.path.join(cf_pth, "DG.cbc"), precision="double") v1 = cbcObj.get_data(kstpkper=(0, 0), text="DRAINS", full3D=True)[0] v2 = cbcObj.get_data(kstpkper=(0, 0), text="STREAM LEAKAGE", full3D=True)[0] v3 = cbcObj.get_data(kstpkper=(0, 0), text="ET", full3D=True)[0] @@ -103,9 +97,7 @@ def cf_model(model, k, i, j, base, Q=-100): # write some summary information fs.write(f"Problem size: {nrow} rows and {ncol} columns.\n") -fs.write( - f"Capture fraction analysis performed every {nstep} rows and columns.\n" -) +fs.write(f"Capture fraction analysis performed every {nstep} rows and columns.\n") fs.write(f"Maximum number of analyses: {nrow2} rows and {ncol2} columns.\n") # create array to store capture fraction data (subset of model) diff --git a/.docs/groundwater_paper/scripts/uspb_capture_par.py b/.docs/groundwater_paper/scripts/uspb_capture_par.py index 41f666a19e..95de8efb34 100644 --- a/.docs/groundwater_paper/scripts/uspb_capture_par.py +++ b/.docs/groundwater_paper/scripts/uspb_capture_par.py @@ -43,9 +43,7 @@ def load_base_model(klay): def get_baseQ(model): - sys.stdout.write( - "\nrunning base model to get base head-dependent flow\n\n" - ) + sys.stdout.write("\nrunning base model to get base head-dependent flow\n\n") success, report = model.run_model(silent=True, report=True) sys.stdout.write(f"Base model run: {report[-3]}\n") @@ -54,9 +52,7 @@ def get_baseQ(model): os.path.join(model.model_ws, "DG.cbc"), precision=precision ) v1 = cbcObj.get_data(kstpkper=(0, 0), text="DRAINS", full3D=True)[0] - v2 = cbcObj.get_data(kstpkper=(0, 0), text="STREAM LEAKAGE", full3D=True)[ - 0 - ] + v2 = cbcObj.get_data(kstpkper=(0, 0), text="STREAM LEAKAGE", full3D=True)[0] v3 = cbcObj.get_data(kstpkper=(0, 0), text="ET", full3D=True)[0] return v1.sum() + v2.sum() + v3.sum() @@ -96,18 +92,14 @@ def copy_files(ml, nproc): (1, 99): ["save head", "save budget", "print budget"], (1, 100): [], } - oc = flopy.modflow.ModflowOc( - ml, stress_period_data=stress_period_data - ) + oc = flopy.modflow.ModflowOc(ml, stress_period_data=stress_period_data) # write the input files ml.write_input() else: if not os.path.exists(cf_pths[idx]): os.makedirs(cf_pths[idx]) filelist = [f for f in os.listdir(cf_pths[0])] - sys.stdout.write( - f"copying files from {cf_pths[0]} to {cf_pths[idx]}\n" - ) + sys.stdout.write(f"copying files from {cf_pths[0]} to {cf_pths[idx]}\n") for f in filelist: if os.path.splitext(f)[1].lower() in exclude: continue @@ -196,24 +188,18 @@ def cf_model(imod, ion, nmax, k, i, j, Qt, base, hdry): if h[idx, 1] == hdry: v[idx] = np.nan else: - v1 = cbcObj.get_data( - kstpkper=kon, text="DRAINS", full3D=True - )[0] + v1 = cbcObj.get_data(kstpkper=kon, text="DRAINS", full3D=True)[0] v2 = cbcObj.get_data( kstpkper=kon, text="STREAM LEAKAGE", full3D=True )[0] - v3 = cbcObj.get_data(kstpkper=kon, text="ET", full3D=True)[ - 0 - ] + v3 = cbcObj.get_data(kstpkper=kon, text="ET", full3D=True)[0] v[idx] = ((v1.sum() + v2.sum() + v3.sum()) - base) / (-Qt) except: line += f" Error: Model run: {ion + 1} of {nmax} (model number {imod}) - " line += "could not process model results.\n" v[:] = np.nan else: - line += ( - f" Error: Model run: {ion + 1} of {nmax} (model number {imod}) " - ) + line += f" Error: Model run: {ion + 1} of {nmax} (model number {imod}) " line += "did not execute successfully\n" v[:] = np.nan sys.stdout.write(line) @@ -264,12 +250,8 @@ def doit(): # write some summary information fs.write(f"Problem size: {nrow} rows and {ncol} columns.\n") - fs.write( - f"Capture fraction analysis performed every {nstep} rows and columns.\n" - ) - fs.write( - f"Maximum number of analyses: {nrow2} rows and {ncol2} columns.\n" - ) + fs.write(f"Capture fraction analysis performed every {nstep} rows and columns.\n") + fs.write(f"Maximum number of analyses: {nrow2} rows and {ncol2} columns.\n") # create array to store capture fraction data (subset of model) cf_array = np.empty((10, nrow2, ncol2), dtype=float) @@ -344,9 +326,7 @@ def doit(): res_pth, f"USPB_capture_fraction_{nstep:02d}_{idx + 1:02d}.dat", ) - sys.stdout.write( - f"saving capture fraction data to...{os.path.basename(fn)}\n" - ) + sys.stdout.write(f"saving capture fraction data to...{os.path.basename(fn)}\n") np.savetxt(fn, cf_array[idx, :, :], delimiter=" ") diff --git a/.docs/pysrc/tutorial2.py b/.docs/pysrc/tutorial2.py index b045ff9f7e..4c08fd3853 100644 --- a/.docs/pysrc/tutorial2.py +++ b/.docs/pysrc/tutorial2.py @@ -108,9 +108,7 @@ "print head", "print budget", ] -oc = flopy.modflow.ModflowOc( - mf, stress_period_data=stress_period_data, compact=True -) +oc = flopy.modflow.ModflowOc(mf, stress_period_data=stress_period_data, compact=True) # Write the model input files mf.write_input() diff --git a/autotest/conftest.py b/autotest/conftest.py index b0d0f54bb3..1706e52d10 100644 --- a/autotest/conftest.py +++ b/autotest/conftest.py @@ -156,7 +156,5 @@ def pytest_report_header(config): if installed: lines.append(f"{optional} packages: {', '.join(installed)}") if not_found: - lines.append( - f"{optional} packages not found: {', '.join(not_found)}" - ) + lines.append(f"{optional} packages not found: {', '.join(not_found)}") return "\n".join(lines) diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py index aad5b34602..f2e62408be 100644 --- a/autotest/regression/test_mf6.py +++ b/autotest/regression/test_mf6.py @@ -71,13 +71,9 @@ def test_ts(function_tmpdir, example_data_path): ) # create the Flopy groundwater flow (gwf) model object model_nam_file = f"{name}.nam" - gwf = flopy.mf6.ModflowGwf( - sim, modelname=name, model_nam_file=model_nam_file - ) + gwf = flopy.mf6.ModflowGwf(sim, modelname=name, model_nam_file=model_nam_file) # create the flopy iterative model solver (ims) package object - ims = flopy.mf6.modflow.mfims.ModflowIms( - sim, pname="ims", complexity="SIMPLE" - ) + ims = flopy.mf6.modflow.mfims.ModflowIms(sim, pname="ims", complexity="SIMPLE") # create the discretization package bot = np.linspace(-3.0, -50.0 / 3.0, 3) delrow = delcol = 4.0 @@ -160,9 +156,7 @@ def test_ts(function_tmpdir, example_data_path): for layer, cond in zip(range(1, 3), [15.0, 1500.0]): for row in range(0, 15): if row < 10: - ghb_period.append( - ((layer, row, 9), "tides", cond, "Estuary-L2") - ) + ghb_period.append(((layer, row, 9), "tides", cond, "Estuary-L2")) else: ghb_period.append(((layer, row, 9), "wl", cond, "Estuary-L2")) ghb_spd_ts[0] = ghb_period @@ -326,9 +320,7 @@ def test_np001(function_tmpdir, example_data_path): sim, time_units="DAYS", nper=1, perioddata=[(2.0, 1, 1.0)] ) # specifying the tdis package twice should remove the old tdis package - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=2, perioddata=tdis_rc - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) # first ims file to be replaced ims_package = ModflowIms( sim, @@ -364,9 +356,7 @@ def test_np001(function_tmpdir, example_data_path): number_orthogonalizations=2, ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") # test case insensitive lookup assert sim.get_model(model_name.upper()) is not None @@ -498,9 +488,7 @@ def test_np001(function_tmpdir, example_data_path): stress_period_data=well_spd, ) wel_package.stress_period_data.add_transient_key(1) - wel_package.stress_period_data.set_data( - {1: {"filename": "wel.txt", "factor": 1.0}} - ) + wel_package.stress_period_data.set_data({1: {"filename": "wel.txt", "factor": 1.0}}) # test getting data from a binary file well_data = wel_package.stress_period_data.get_data(0) @@ -585,9 +573,7 @@ def test_np001(function_tmpdir, example_data_path): # write simulation to new location sim.set_all_data_external() sim.write_simulation() - assert ( - sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data() - ) + assert sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data() # test package file with relative path to simulation path wel_path = os.path.join(ws, "well_folder", f"{model_name}.wel") assert os.path.exists(wel_path) @@ -636,14 +622,10 @@ def test_np001(function_tmpdir, example_data_path): wel_path = os.path.join(ws, md_folder, "well_folder", f"{model_name}.wel") assert os.path.exists(wel_path) # test data file was recreated by set_all_data_external - riv_path = ( - function_tmpdir / "data" / "np001_mod.riv_stress_period_data_1.txt" - ) + riv_path = function_tmpdir / "data" / "np001_mod.riv_stress_period_data_1.txt" assert os.path.exists(riv_path) - assert ( - sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data() - ) + assert sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data() # run simulation from new path with external files sim.run_simulation() @@ -872,12 +854,8 @@ def test_np002(function_tmpdir, example_data_path): assert name.memory_print_option.get_data() is None tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=2, perioddata=tdis_rc - ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") ims_package = ModflowIms( sim, print_option="ALL", @@ -1144,12 +1122,8 @@ def test021_twri(function_tmpdir, example_data_path): ) sim.set_sim_path(function_tmpdir) tdis_rc = [(86400.0, 1, 1.0)] - tdis_package = ModflowTdis( - sim, time_units="SECONDS", nper=1, perioddata=tdis_rc - ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + tdis_package = ModflowTdis(sim, time_units="SECONDS", nper=1, perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") ims_package = ModflowIms( sim, print_option="SUMMARY", @@ -1170,9 +1144,7 @@ def test021_twri(function_tmpdir, example_data_path): fname = "top.bin" nrow = 15 ncol = 15 - data_folder = os.path.join( - sim.simulation_data.mfpath.get_sim_path(), fname - ) + data_folder = os.path.join(sim.simulation_data.mfpath.get_sim_path(), fname) f = open(data_folder, "wb") header = flopy.utils.BinaryHeader.create( bintype="HEAD", @@ -1353,9 +1325,7 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path): expected_head_file = expected_output_folder / "AdvGW_tidal.hds" # create simulation - sim = MFSimulation( - sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth - ) + sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth) # test tdis package deletion tdis_package = ModflowTdis( sim, time_units="DAYS", nper=1, perioddata=[(2.0, 2, 1.0)] @@ -1368,12 +1338,8 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path): (10.0, 120, 1.0), (10.0, 120, 1.0), ] - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=4, perioddata=tdis_rc - ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") ims_package = ModflowIms( sim, print_option="SUMMARY", @@ -1427,9 +1393,7 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path): DataStorageType.internal_constant, DataStorageType.internal_array, ] - ss_template = ModflowGwfsto.ss.empty( - model, True, layer_storage_types, 0.000001 - ) + ss_template = ModflowGwfsto.ss.empty(model, True, layer_storage_types, 0.000001) sto_package = ModflowGwfsto( model, save_flows=True, @@ -1489,9 +1453,7 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path): ts_dict = { "filename": os.path.join("well-rates", "well-rates.ts"), "timeseries": timeseries, - "time_series_namerecord": [ - ("well_1_rate", "well_2_rate", "well_3_rate") - ], + "time_series_namerecord": [("well_1_rate", "well_2_rate", "well_3_rate")], "interpolation_methodrecord": [("stepwise", "stepwise", "stepwise")], } # test removing package with child packages @@ -1571,9 +1533,7 @@ def test005_create_tests_advgw_tidal(function_tmpdir, example_data_path): ghb_period_array = [] for layer, cond in zip(range(1, 3), [15.0, 1500.0]): for row in range(0, 15): - ghb_period_array.append( - ((layer, row, 9), "tides", cond, "Estuary-L2") - ) + ghb_period_array.append(((layer, row, 9), "tides", cond, "Estuary-L2")) ghb_period[0] = ghb_period_array # build ts ghb @@ -1989,16 +1949,10 @@ def test004_create_tests_bcfss(function_tmpdir, example_data_path): expected_head_file = os.path.join(expected_output_folder, "bcf2ss.hds") # create simulation - sim = MFSimulation( - sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=pth - ) + sim = MFSimulation(sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=pth) tdis_rc = [(1.0, 1, 1.0), (1.0, 1, 1.0)] - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=2, perioddata=tdis_rc - ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") ims_package = ModflowIms( sim, print_option="ALL", @@ -2089,9 +2043,7 @@ def test004_create_tests_bcfss(function_tmpdir, example_data_path): riv_period_array = [] aux_vals = [1.0, 5.0, 4.0, 8.0, 3.0, "bad value", 5.5, 6.3, 8.1, 18.3] for row in range(0, 10): - riv_period_array.append( - ((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0) - ) + riv_period_array.append(((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0)) riv_period[0] = riv_period_array riv_package = ModflowGwfriv( model, @@ -2102,25 +2054,17 @@ def test004_create_tests_bcfss(function_tmpdir, example_data_path): ) chk = riv_package.check() summary = ".".join(chk.summary_array.desc) - assert ( - summary == "Invalid non-numeric value 'bad value' in auxiliary " - "data." - ) + assert summary == "Invalid non-numeric value 'bad value' in auxiliary data." # test with boundnames riv_package.boundnames = True riv_period_array = [] for row in range(0, 10): - riv_period_array.append( - ((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0) - ) + riv_period_array.append(((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0)) riv_period[0] = riv_period_array riv_package.stress_period_data = riv_period chk = riv_package.check() summary = ".".join(chk.summary_array.desc) - assert ( - summary == "Invalid non-numeric value 'bad value' in auxiliary " - "data." - ) + assert summary == "Invalid non-numeric value 'bad value' in auxiliary data." # fix aux variable riv_package.boundnames = False @@ -2128,9 +2072,7 @@ def test004_create_tests_bcfss(function_tmpdir, example_data_path): riv_period_array = [] aux_vals = [1.0, 5.0, 4.0, 8.0, 3.0, 5.0, 5.5, 6.3, 8.1, 18.3] for row in range(0, 10): - riv_period_array.append( - ((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0) - ) + riv_period_array.append(((1, row, 14), 0.0, 10000.0, -5.0, aux_vals[row], 10.0)) riv_period[0] = riv_period_array riv_package.stress_period_data = riv_period # check again @@ -2186,21 +2128,13 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path): model_name = "fhb2015" pth = example_data_path / "mf6" / "create_tests" / test_ex_name expected_output_folder = os.path.join(pth, "expected_output") - expected_head_file = os.path.join( - expected_output_folder, "fhb2015_fhb.hds" - ) + expected_head_file = os.path.join(expected_output_folder, "fhb2015_fhb.hds") # create simulation - sim = MFSimulation( - sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=pth - ) + sim = MFSimulation(sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=pth) tdis_rc = [(400.0, 10, 1.0), (200.0, 4, 1.0), (400.0, 6, 1.1)] - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=3, perioddata=tdis_rc - ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=3, perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") ims_package = ModflowIms( sim, print_option="SUMMARY", @@ -2230,9 +2164,7 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path): filename=f"{model_name}.dis", ) ic_package = ModflowGwfic(model, strt=0.0, filename=f"{model_name}.ic") - npf_package = ModflowGwfnpf( - model, perched=True, icelltype=0, k=20.0, k33=1.0 - ) + npf_package = ModflowGwfnpf(model, perched=True, icelltype=0, k=20.0, k33=1.0) oc_package = ModflowGwfoc( model, head_filerecord="fhb2015_fhb.hds", @@ -2247,9 +2179,7 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path): model, storagecoefficient=True, iconvert=0, ss=0.01, sy=0.0 ) time = model.modeltime - assert not ( - time.steady_state[0] or time.steady_state[1] or time.steady_state[2] - ) + assert not (time.steady_state[0] or time.steady_state[1] or time.steady_state[2]) wel_period = {0: [((0, 1, 0), "flow")]} wel_package = ModflowGwfwel( model, @@ -2272,9 +2202,7 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path): interpolation_methodrecord="linear", ) - chd_period = { - 0: [((0, 0, 9), "head"), ((0, 1, 9), "head"), ((0, 2, 9), "head")] - } + chd_period = {0: [((0, 0, 9), "head"), ((0, 1, 9), "head"), ((0, 2, 9), "head")]} chd_package = ModflowGwfchd( model, print_input=True, @@ -2336,12 +2264,8 @@ def test006_create_tests_gwf3_disv(function_tmpdir, example_data_path): ) sim.set_sim_path(function_tmpdir) tdis_rc = [(1.0, 1, 1.0)] - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=1, perioddata=tdis_rc - ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") ims_package = ModflowIms( sim, print_option="SUMMARY", @@ -2494,13 +2418,9 @@ def test006_create_tests_gwf3_disv(function_tmpdir, example_data_path): 0, 0, ] - ic_package = ModflowGwfic( - model, strt=strt_list, filename=f"{model_name}.ic" - ) + ic_package = ModflowGwfic(model, strt=strt_list, filename=f"{model_name}.ic") k = {"filename": "k.bin", "factor": 1.0, "data": 1.0, "binary": "True"} - npf_package = ModflowGwfnpf( - model, save_flows=True, icelltype=0, k=k, k33=1.0 - ) + npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=0, k=k, k33=1.0) k_data = npf_package.k.get_data() assert k_data[0, 0] == 1.0 @@ -2622,13 +2542,9 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path): expected_head_file_2 = os.path.join(expected_output_folder, "model2.hds") # create simulation - sim = MFSimulation( - sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth - ) + sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth) tdis_rc = [(1.0, 1, 1.0)] - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=1, perioddata=tdis_rc - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc) model_1 = ModflowGwf( sim, modelname=model_name_1, @@ -2782,12 +2698,8 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path): 1.0, 0.0, ] - ic_package_1 = ModflowGwfic( - model_1, strt=strt_list, filename=f"{model_name_1}.ic" - ) - ic_package_2 = ModflowGwfic( - model_2, strt=1.0, filename=f"{model_name_2}.ic" - ) + ic_package_1 = ModflowGwfic(model_1, strt=strt_list, filename=f"{model_name_1}.ic") + ic_package_2 = ModflowGwfic(model_2, strt=1.0, filename=f"{model_name_2}.ic") npf_package_1 = ModflowGwfnpf( model_1, save_flows=True, perched=True, icelltype=0, k=1.0, k33=1.0 ) @@ -2963,16 +2875,10 @@ def test050_create_tests_circle_island(function_tmpdir, example_data_path): expected_head_file = expected_output_folder / "ci.output.hds" # create simulation - sim = MFSimulation( - sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth - ) + sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth) tdis_rc = [(1.0, 1, 1.0)] - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=1, perioddata=tdis_rc - ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") ims_package = ModflowIms( sim, print_option="SUMMARY", @@ -3001,9 +2907,7 @@ def test050_create_tests_circle_island(function_tmpdir, example_data_path): filename=f"{model_name}.disv", ) ic_package = ModflowGwfic(model, strt=0.0, filename=f"{model_name}.ic") - npf_package = ModflowGwfnpf( - model, save_flows=True, icelltype=0, k=10.0, k33=0.2 - ) + npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=0, k=10.0, k33=0.2) oc_package = ModflowGwfoc( model, budget_filerecord="ci.output.cbc", @@ -3012,9 +2916,7 @@ def test050_create_tests_circle_island(function_tmpdir, example_data_path): printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], ) - stress_period_data = testutils.read_ghbrecarray( - os.path.join(pth, "ghb.txt"), 2 - ) + stress_period_data = testutils.read_ghbrecarray(os.path.join(pth, "ghb.txt"), 2) ghb_package = ModflowGwfghb( model, maxbound=3173, stress_period_data=stress_period_data ) @@ -3064,9 +2966,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path): expected_head_file = expected_output_folder / "test1tr.hds" # create simulation - sim = MFSimulation( - sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth - ) + sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth) sim.name_file.continue_.set_data(True) tdis_rc = [(1577889000, 50, 1.1), (1577889000, 50, 1.1)] tdis_package = ModflowTdis( @@ -3076,9 +2976,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path): perioddata=tdis_rc, filename="simulation.tdis", ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") model.name_file.save_flows.set_data(True) ims_package = ModflowIms( sim, @@ -3122,9 +3020,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path): ) strt = testutils.read_std_array(os.path.join(pth, "strt.txt"), "float") strt_int = ["internal", "factor", 1.0, "iprn", 0, strt] - ic_package = ModflowGwfic( - model, strt=strt_int, filename=f"{model_name}.ic" - ) + ic_package = ModflowGwfic(model, strt=strt_int, filename=f"{model_name}.ic") k_vals = testutils.read_std_array(os.path.join(pth, "k.txt"), "float") k = ["internal", "factor", 3.000e-03, "iprn", 0, k_vals] @@ -3136,9 +3032,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path): budget_filerecord="test1tr.cbc", head_filerecord="test1tr.hds", saverecord={0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]}, - printrecord={ - 0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)] - }, + printrecord={0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]}, ) sy_vals = testutils.read_std_array(os.path.join(pth, "sy.txt"), "float") @@ -3177,9 +3071,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path): filename="test028_sfr.evt.obs", print_input=True, continuous=obs_dict ) - stress_period_data = { - 0: [((0, 12, 0), 988.0, 0.038), ((0, 13, 8), 1045.0, 0.038)] - } + stress_period_data = {0: [((0, 12, 0), 988.0, 0.038), ((0, 13, 8), 1045.0, 0.038)]} ghb_package = ModflowGwfghb( model, maxbound=2, stress_period_data=stress_period_data ) @@ -3319,9 +3211,7 @@ def test028_create_tests_sfr(function_tmpdir, example_data_path): # test hpc package part = [("model1", 1), ("model2", 2)] - hpc = ModflowUtlhpc( - sim, dev_log_mpi=True, partitions=part, filename="test.hpc" - ) + hpc = ModflowUtlhpc(sim, dev_log_mpi=True, partitions=part, filename="test.hpc") assert sim.hpc.dev_log_mpi.get_data() assert hpc.filename == "test.hpc" @@ -3479,9 +3369,7 @@ def test_create_tests_transport(function_tmpdir, example_data_path): ic = ModflowGwfic(gwf, strt=strt) # node property flow - npf = ModflowGwfnpf( - gwf, save_flows=False, icelltype=laytyp[idx], k=hk, k33=hk - ) + npf = ModflowGwfnpf(gwf, save_flows=False, icelltype=laytyp[idx], k=hk, k33=hk) # storage sto = ModflowGwfsto( gwf, @@ -3573,9 +3461,7 @@ def test_create_tests_transport(function_tmpdir, example_data_path): gwt, budget_filerecord=f"{gwtname}.cbc", concentration_filerecord=f"{gwtname}.ucn", - concentrationprintrecord=[ - ("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL") - ], + concentrationprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], saverecord=[("CONCENTRATION", "ALL")], printrecord=[("CONCENTRATION", "ALL"), ("BUDGET", "ALL")], ) @@ -3639,18 +3525,10 @@ def test001a_tharmonic(function_tmpdir, example_data_path): pth = example_data_path / "mf6" / test_ex_name expected_output_folder = os.path.join(pth, "expected_output") - expected_head_file_a = os.path.join( - expected_output_folder, "flow15_flow_unch.hds" - ) - expected_head_file_b = os.path.join( - expected_output_folder, "flow15_flow_adj.hds" - ) - expected_cbc_file_a = os.path.join( - expected_output_folder, "flow15_flow_unch.cbc" - ) - expected_cbc_file_b = os.path.join( - expected_output_folder, "flow15_flow_adj.cbc" - ) + expected_head_file_a = os.path.join(expected_output_folder, "flow15_flow_unch.hds") + expected_head_file_b = os.path.join(expected_output_folder, "flow15_flow_adj.hds") + expected_cbc_file_a = os.path.join(expected_output_folder, "flow15_flow_unch.cbc") + expected_cbc_file_b = os.path.join(expected_output_folder, "flow15_flow_adj.cbc") array_util = PyListUtil() @@ -3693,13 +3571,9 @@ def test001a_tharmonic(function_tmpdir, example_data_path): # compare output to expected results head_new = function_tmpdir / "flow15_flow.hds" - assert compare_heads( - None, None, files1=[expected_head_file_a], files2=[head_new] - ) + assert compare_heads(None, None, files1=[expected_head_file_a], files2=[head_new]) - budget_frf = sim.simulation_data.mfdata[ - (model_name, "CBC", "FLOW-JA-FACE") - ] + budget_frf = sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")] assert array_util.array_comp(budget_frf_valid, budget_frf) # change some settings @@ -3747,13 +3621,9 @@ def test001a_tharmonic(function_tmpdir, example_data_path): # compare output to expected results head_new = os.path.join(save_folder, "flow15_flow.hds") - assert compare_heads( - None, None, files1=[expected_head_file_b], files2=[head_new] - ) + assert compare_heads(None, None, files1=[expected_head_file_b], files2=[head_new]) - budget_frf = sim.simulation_data.mfdata[ - (model_name, "CBC", "FLOW-JA-FACE") - ] + budget_frf = sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")] assert array_util.array_comp(budget_frf_valid, budget_frf) @@ -3773,9 +3643,7 @@ def test003_gwfs_disv(function_tmpdir, example_data_path): array_util = PyListUtil() # load simulation - sim = MFSimulation.load( - model_name, "mf6", "mf6", data_folder, verify_data=True - ) + sim = MFSimulation.load(model_name, "mf6", "mf6", data_folder, verify_data=True) # make temp folder to save simulation sim.set_sim_path(function_tmpdir) @@ -3795,13 +3663,9 @@ def test003_gwfs_disv(function_tmpdir, example_data_path): ) head_new = os.path.join(function_tmpdir, "model.hds") - assert compare_heads( - None, None, files1=[expected_head_file_a], files2=[head_new] - ) + assert compare_heads(None, None, files1=[expected_head_file_a], files2=[head_new]) - budget_frf = sim.simulation_data.mfdata[ - (model_name, "CBC", "FLOW-JA-FACE") - ] + budget_frf = sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")] assert array_util.array_comp(budget_fjf_valid, budget_frf) model = sim.get_model(model_name) @@ -3831,19 +3695,13 @@ def test003_gwfs_disv(function_tmpdir, example_data_path): # get expected results budget_obj = CellBudgetFile(expected_cbc_file_b, precision="double") - budget_fjf_valid = np.array( - budget_obj.get_data(text="FLOW JA FACE", full3D=True) - ) + budget_fjf_valid = np.array(budget_obj.get_data(text="FLOW JA FACE", full3D=True)) # compare output to expected results head_new = os.path.join(save_folder, "model.hds") - assert compare_heads( - None, None, files1=[expected_head_file_b], files2=[head_new] - ) + assert compare_heads(None, None, files1=[expected_head_file_b], files2=[head_new]) - budget_frf = sim.simulation_data.mfdata[ - (model_name, "CBC", "FLOW-JA-FACE") - ] + budget_frf = sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")] assert array_util.array_comp(budget_fjf_valid, budget_frf) @@ -3856,12 +3714,8 @@ def test005_advgw_tidal(function_tmpdir, example_data_path): model_name = "gwf_1" pth = example_data_path / "mf6" / test_ex_name expected_output_folder = os.path.join(pth, "expected_output") - expected_head_file_a = os.path.join( - expected_output_folder, "AdvGW_tidal_unch.hds" - ) - expected_head_file_b = os.path.join( - expected_output_folder, "AdvGW_tidal_adj.hds" - ) + expected_head_file_a = os.path.join(expected_output_folder, "AdvGW_tidal_unch.hds") + expected_head_file_b = os.path.join(expected_output_folder, "AdvGW_tidal_adj.hds") # load simulation sim = MFSimulation.load( @@ -3939,13 +3793,9 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): expected_head_file_2 = os.path.join(expected_output_folder, "model2.hds") # create simulation - sim = MFSimulation( - sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth - ) + sim = MFSimulation(sim_name=test_ex_name, version="mf6", exe_name="mf6", sim_ws=pth) tdis_rc = [(1.0, 1, 1.0)] - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=1, perioddata=tdis_rc - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc) model_1 = ModflowGwf( sim, modelname=model_name_1, @@ -3999,12 +3849,8 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): cell2d=c2drecarray, filename=f"{model_name_2}.disv", ) - ic_package_1 = ModflowGwfic( - model_1, strt=1.0, filename=f"{model_name_1}.ic" - ) - ic_package_2 = ModflowGwfic( - model_2, strt=1.0, filename=f"{model_name_2}.ic" - ) + ic_package_1 = ModflowGwfic(model_1, strt=1.0, filename=f"{model_name_1}.ic") + ic_package_2 = ModflowGwfic(model_2, strt=1.0, filename=f"{model_name_2}.ic") npf_package_1 = ModflowGwfnpf( model_1, save_flows=True, perched=True, icelltype=0, k=1.0, k33=1.0 ) @@ -4042,9 +3888,7 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): maxbound=30, stress_period_data=stress_period_data, ) - exgrecarray = testutils.read_exchangedata( - os.path.join(pth, "exg.txt"), 3, 2 - ) + exgrecarray = testutils.read_exchangedata(os.path.join(pth, "exg.txt"), 3, 2) exg_data = { "filename": "exg_data.bin", "data": exgrecarray, @@ -4074,9 +3918,7 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): ) gnc_path = os.path.join("gnc", "test006_2models_gnc.gnc") - gncrecarray = testutils.read_gncrecarray( - os.path.join(pth, "gnc.txt"), 3, 2 - ) + gncrecarray = testutils.read_gncrecarray(os.path.join(pth, "gnc.txt"), 3, 2) gnc_package = exg_package.gnc.initialize( filename=gnc_path, print_input=True, @@ -4191,9 +4033,7 @@ def test006_gwf3(function_tmpdir, example_data_path): budget_fjf = np.array( sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")] ) - assert array_util.array_comp( - np.array(budget_fjf_valid), np.array(budget_fjf) - ) + assert array_util.array_comp(np.array(budget_fjf_valid), np.array(budget_fjf)) # change some settings model = sim.get_model(model_name) @@ -4238,9 +4078,7 @@ def test006_gwf3(function_tmpdir, example_data_path): budget_fjf = np.array( sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")] ) - assert array_util.array_comp( - np.array(budget_fjf_valid), np.array(budget_fjf) - ) + assert array_util.array_comp(np.array(budget_fjf_valid), np.array(budget_fjf)) # confirm that files did move save_folder = function_tmpdir / "save02" @@ -4286,9 +4124,7 @@ def test006_gwf3(function_tmpdir, example_data_path): budget_fjf = np.array( sim.simulation_data.mfdata[(model_name, "CBC", "FLOW-JA-FACE")] ) - assert array_util.array_comp( - np.array(budget_fjf_valid), np.array(budget_fjf) - ) + assert array_util.array_comp(np.array(budget_fjf_valid), np.array(budget_fjf)) # confirm that files did not move assert not os.path.isfile(os.path.join(save_folder, "flow.disu.ja.dat")) @@ -4311,12 +4147,8 @@ def test045_lake1ss_table(function_tmpdir, example_data_path): model_name = "lakeex1b" pth = example_data_path / "mf6" / test_ex_name expected_output_folder = os.path.join(pth, "expected_output") - expected_head_file_a = os.path.join( - expected_output_folder, "lakeex1b_unch.hds" - ) - expected_head_file_b = os.path.join( - expected_output_folder, "lakeex1b_adj.hds" - ) + expected_head_file_a = os.path.join(expected_output_folder, "lakeex1b_unch.hds") + expected_head_file_b = os.path.join(expected_output_folder, "lakeex1b_adj.hds") # load simulation sim = MFSimulation.load( @@ -4427,9 +4259,7 @@ def test006_2models_mvr(function_tmpdir, example_data_path): expected_head_file_bb = expected_output_folder / "model2_adj.hds" # load simulation - sim = MFSimulation.load( - sim_name, "mf6", "mf6", data_folder, verify_data=True - ) + sim = MFSimulation.load(sim_name, "mf6", "mf6", data_folder, verify_data=True) # make temp folder to save simulation sim.set_sim_path(ws) @@ -4645,19 +4475,15 @@ def test001e_uzf_3lay(function_tmpdir, example_data_path): ["ic6", "ims", "obs6", "oc6"], ] for load_only in load_only_lists: - sim = MFSimulation.load( - model_name, "mf6", "mf6", pth, load_only=load_only - ) + sim = MFSimulation.load(model_name, "mf6", "mf6", pth, load_only=load_only) sim.set_sim_path(function_tmpdir) model = sim.get_model() for package in model_package_check: - assert ( - model.get_package(package, type_only=True) is not None - ) == (package in load_only or f"{package}6" in load_only) + assert (model.get_package(package, type_only=True) is not None) == ( + package in load_only or f"{package}6" in load_only + ) # test running a runnable load_only case - sim = MFSimulation.load( - model_name, "mf6", "mf6", pth, load_only=load_only_lists[0] - ) + sim = MFSimulation.load(model_name, "mf6", "mf6", pth, load_only=load_only_lists[0]) sim.set_sim_path(function_tmpdir) success, buff = sim.run_simulation() assert success, f"simulation {sim.name} from load did not run" @@ -4811,9 +4637,7 @@ def test036_twrihfb(function_tmpdir, example_data_path): cond_data[0][index][2] = 2.1 cond.set_data(cond_data[0], 0) - rch = sim.simulation_data.mfdata[ - (model_name, "rcha", "period", "recharge") - ] + rch = sim.simulation_data.mfdata[(model_name, "rcha", "period", "recharge")] rch_data = rch.get_data() assert rch_data[0][5, 1] == 0.00000003 @@ -4864,9 +4688,7 @@ def test027_timeseriestest(function_tmpdir, example_data_path): sim.write_simulation() # reload sim - sim = MFSimulation.load( - model_name, "mf6", "mf6", function_tmpdir, verify_data=True - ) + sim = MFSimulation.load(model_name, "mf6", "mf6", function_tmpdir, verify_data=True) sim.write_simulation() # run simulation @@ -4937,9 +4759,7 @@ def test099_create_tests_int_ext(function_tmpdir, example_data_path): perioddata=tdis_rc, filename="simulation.tdis", ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") model.name_file.save_flows.set_data(True) ims_package = ModflowIms( sim, @@ -4983,9 +4803,7 @@ def test099_create_tests_int_ext(function_tmpdir, example_data_path): ) strt = np.ones((15, 10), float) * 50.0 strt_int = {"filename": "strt.txt", "factor": 0.8, "iprn": 0, "data": strt} - ic_package = ModflowGwfic( - model, strt=strt_int, filename=f"{model_name}.ic" - ) + ic_package = ModflowGwfic(model, strt=strt_int, filename=f"{model_name}.ic") k_vals = np.ones((15, 10), float) * 10.0 assert k_vals[0, 0] == 10.0 @@ -4998,9 +4816,7 @@ def test099_create_tests_int_ext(function_tmpdir, example_data_path): budget_filerecord="test1tr.cbc", head_filerecord="test1tr.hds", saverecord={0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]}, - printrecord={ - 0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)] - }, + printrecord={0: [("HEAD", "FREQUENCY", 5), ("BUDGET", "FREQUENCY", 5)]}, ) sy_vals = np.ones((15, 10), float) * 0.1 diff --git a/autotest/regression/test_mf6_pandas.py b/autotest/regression/test_mf6_pandas.py index 7ef875ada4..703ea34e69 100644 --- a/autotest/regression/test_mf6_pandas.py +++ b/autotest/regression/test_mf6_pandas.py @@ -81,9 +81,7 @@ def test_pandas_001(function_tmpdir, example_data_path): assert sim.simulation_data.use_pandas tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis_package = ModflowTdis( - sim, time_units="DAYS", nper=2, perioddata=tdis_rc - ) + tdis_package = ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) # replace with real ims file ims_package = ModflowIms( sim, @@ -101,9 +99,7 @@ def test_pandas_001(function_tmpdir, example_data_path): preconditioner_drop_tolerance=0.01, number_orthogonalizations=2, ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") top = {"filename": "top.txt", "data": 100.0} botm = {"filename": "botm.txt", "data": 50.0} dis_package = ModflowGwfdis( diff --git a/autotest/regression/test_modflow.py b/autotest/regression/test_modflow.py index 792cef2f76..2b15a93995 100644 --- a/autotest/regression/test_modflow.py +++ b/autotest/regression/test_modflow.py @@ -74,9 +74,7 @@ def test_uzf_unit_numbers(function_tmpdir, uzf_example_path): # compare budget terms fsum = join(function_tmpdir, f"{splitext(mfnam)[0]}.budget.out") - success = compare_budget( - fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum - ) + success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum) assert success, "budget comparison failure" @@ -92,10 +90,12 @@ def test_unitnums(function_tmpdir, mf2005_test_path): assert m.load_fail is False, "failed to load all packages" v = (m.nlay, m.nrow, m.ncol, m.nper) - assert v == (1, 7, 100, 50), ( - "modflow-2005 testsfr2_tab does not have " - "1 layer, 7 rows, and 100 columns" - ) + assert v == ( + 1, + 7, + 100, + 50, + ), "modflow-2005 testsfr2_tab does not have 1 layer, 7 rows, and 100 columns" success, buff = m.run_model(silent=False) assert success, "base model run did not terminate successfully" @@ -112,9 +112,7 @@ def test_unitnums(function_tmpdir, mf2005_test_path): fn1 = join(model_ws2, mfnam) fsum = join(ws, f"{splitext(mfnam)[0]}.budget.out") - success = compare_budget( - fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum - ) + success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum) assert success, "budget comparison failure" @@ -131,9 +129,7 @@ def test_gage(function_tmpdir, example_data_path): copytree(pth, ws) # load the modflow model - mf = Modflow.load( - "testsfr2_tab.nam", verbose=True, model_ws=ws, exe_name="mf2005" - ) + mf = Modflow.load("testsfr2_tab.nam", verbose=True, model_ws=ws, exe_name="mf2005") # run the modflow-2005 model success, buff = mf.run_model() @@ -168,10 +164,7 @@ def test_gage(function_tmpdir, example_data_path): @pytest.mark.regression @pytest.mark.parametrize( "namfile", - [ - __example_data_path / "pcgn_test" / nf - for nf in ["twri.nam", "MNW2.nam"] - ], + [__example_data_path / "pcgn_test" / nf for nf in ["twri.nam", "MNW2.nam"]], ) def test_mf2005pcgn(function_tmpdir, namfile): ws = function_tmpdir / "ws" @@ -208,9 +201,7 @@ def test_mf2005pcgn(function_tmpdir, namfile): assert success, "head comparison failure" fsum = function_tmpdir / f"{Path(namfile).stem}.budget.out" - success = compare_budget( - fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum - ) + success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum) assert success, "budget comparison failure" @@ -250,9 +241,7 @@ def test_mf2005gmg(function_tmpdir, namfile): assert success, "head comparison failure" fsum = function_tmpdir / f"{Path(namfile).stem}.budget.out" - success = compare_budget( - fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum - ) + success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum) assert success, "budget comparison failure" @@ -318,9 +307,7 @@ def test_mf2005(function_tmpdir, namfile): # compare budgets fsum = ws / f"{Path(namfile).stem}.budget.out" - success = compare_budget( - fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum - ) + success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum) assert success, "budget comparison failure" @@ -344,9 +331,7 @@ def test_mf2005fhb(function_tmpdir, namfile): ws = function_tmpdir / "ws" copytree(Path(namfile).parent, ws) - m = Modflow.load( - Path(namfile).name, model_ws=ws, verbose=True, exe_name="mf2005" - ) + m = Modflow.load(Path(namfile).name, model_ws=ws, verbose=True, exe_name="mf2005") assert m.load_fail is False success, buff = m.run_model(silent=False) @@ -366,9 +351,7 @@ def test_mf2005fhb(function_tmpdir, namfile): assert success, "head comparison failure" fsum = join(ws, f"{Path(namfile).stem}.budget.out") - success = compare_budget( - fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum - ) + success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum) assert success, "budget comparison failure" @@ -410,7 +393,5 @@ def test_mf2005_lake(function_tmpdir, namfile, mf2005_test_path): fsum = join(ws, f"{Path(namfile).stem}.budget.out") - success = compare_budget( - fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum - ) + success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum) assert success, "budget comparison failure" diff --git a/autotest/regression/test_str.py b/autotest/regression/test_str.py index 9f9aab3c6c..50756aef10 100644 --- a/autotest/regression/test_str.py +++ b/autotest/regression/test_str.py @@ -88,9 +88,7 @@ def test_str_fixed_free(function_tmpdir, example_data_path): except: m2 = None - assert ( - m2 is not None - ), "could not load the fixed format model with aux variables" + assert m2 is not None, "could not load the fixed format model with aux variables" for p in function_tmpdir.glob("*"): p.unlink() @@ -114,9 +112,7 @@ def test_str_fixed_free(function_tmpdir, example_data_path): except: m2 = None - assert ( - m2 is not None - ), "could not load the free format model with aux variables" + assert m2 is not None, "could not load the free format model with aux variables" # compare the fixed and free format head files fn1 = function_tmpdir / "str.nam" diff --git a/autotest/regression/test_swi2.py b/autotest/regression/test_swi2.py index 86dc3d93b8..b0043b3023 100644 --- a/autotest/regression/test_swi2.py +++ b/autotest/regression/test_swi2.py @@ -16,9 +16,7 @@ def swi_path(example_data_path): @requires_exe("mf2005") @pytest.mark.slow @pytest.mark.regression -@pytest.mark.parametrize( - "namfile", ["swiex1.nam", "swiex2_strat.nam", "swiex3.nam"] -) +@pytest.mark.parametrize("namfile", ["swiex1.nam", "swiex2_strat.nam", "swiex3.nam"]) def test_mf2005swi2(function_tmpdir, swi_path, namfile): name = namfile.replace(".nam", "") ws = function_tmpdir / "ws" @@ -47,8 +45,6 @@ def test_mf2005swi2(function_tmpdir, swi_path, namfile): fn1 = os.path.join(model_ws2, namfile) fsum = os.path.join(ws, f"{os.path.splitext(namfile)[0]}.budget.out") - success = compare_budget( - fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum - ) + success = compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum) assert success, "budget comparison failure" diff --git a/autotest/regression/test_wel.py b/autotest/regression/test_wel.py index 0c4a6cc0fd..110bb4148e 100644 --- a/autotest/regression/test_wel.py +++ b/autotest/regression/test_wel.py @@ -84,9 +84,7 @@ def test_binary_well(function_tmpdir): m.remove_package("WEL") # recreate well package with binary output - wel = ModflowWel( - m, stress_period_data=wel_data, binary=True, dtype=wd.dtype - ) + wel = ModflowWel(m, stress_period_data=wel_data, binary=True, dtype=wd.dtype) # write the model to the new path m.write_input() @@ -97,14 +95,10 @@ def test_binary_well(function_tmpdir): fn1 = os.path.join(pth, f"{mfnam}.nam") # compare the files - fsum = os.path.join( - function_tmpdir, f"{os.path.splitext(mfnam)[0]}.head.out" - ) + fsum = os.path.join(function_tmpdir, f"{os.path.splitext(mfnam)[0]}.head.out") assert compare_heads(fn0, fn1, outfile=fsum), "head comparison failure" - fsum = os.path.join( - function_tmpdir, f"{os.path.splitext(mfnam)[0]}.budget.out" - ) + fsum = os.path.join(function_tmpdir, f"{os.path.splitext(mfnam)[0]}.budget.out") assert compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum ), "budget comparison failure" diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py index 09351f4bfd..b61bd94f72 100644 --- a/autotest/test_binaryfile.py +++ b/autotest/test_binaryfile.py @@ -122,9 +122,7 @@ def test_headfile_build_index(example_data_path): ) # check first and last recorddict list_recordarray = hds.recordarray.tolist() - assert list_recordarray[0] == ( - (1, 1, 1.0, 1.0, b" HEAD", 20, 40, 1) - ) + assert list_recordarray[0] == ((1, 1, 1.0, 1.0, b" HEAD", 20, 40, 1)) assert list_recordarray[-1] == ( (1, 1097, 1.0, 1097.0, b" HEAD", 20, 40, 3) ) @@ -179,12 +177,8 @@ def test_concentration_build_index(example_data_path): ) # check first and last recorddict list_recordarray = ucn.recordarray.tolist() - assert list_recordarray[0] == ( - (29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 1) - ) - assert list_recordarray[-1] == ( - (29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 8) - ) + assert list_recordarray[0] == ((29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 1)) + assert list_recordarray[-1] == ((29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 8)) assert ucn.times == [np.float32(100.0)] assert ucn.kstpkper == [(1, 1)] np.testing.assert_array_equal(ucn.iposarray, np.arange(8) * 1304 + 44) @@ -212,9 +206,7 @@ def test_concentration_build_index(example_data_path): def test_binaryfile_writeread(function_tmpdir, nwt_model_path): model = "Pr3_MFNWT_lower.nam" - ml = flopy.modflow.Modflow.load( - model, version="mfnwt", model_ws=nwt_model_path - ) + ml = flopy.modflow.Modflow.load(model, version="mfnwt", model_ws=nwt_model_path) # change the model work space ml.change_model_ws(function_tmpdir) # @@ -442,9 +434,7 @@ def test_binaryfile_read(function_tmpdir, freyberg_model_path): assert np.array_equal( h0, h1 ), "binary head read using totim != head read using kstpkper" - assert np.array_equal( - h0, h2 - ), "binary head read using totim != head read using idx" + assert np.array_equal(h0, h2), "binary head read using totim != head read using idx" ts = h.get_ts((0, 7, 5)) expected = 26.00697135925293 @@ -478,9 +468,7 @@ def test_binaryfile_read_context(freyberg_model_path): def test_binaryfile_reverse_mf6_dis(function_tmpdir): name = "reverse_dis" - sim = flopy.mf6.MFSimulation( - sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" - ) + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6") tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)] nper = len(tdis_rc) tdis = flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=tdis_rc) @@ -523,20 +511,14 @@ def test_binaryfile_reverse_mf6_dis(function_tmpdir): # reverse budget and write to separate file budget_file_rev_path = function_tmpdir / f"{budget_file}_rev" - budget_file = flopy.utils.CellBudgetFile( - function_tmpdir / budget_file, tdis=tdis - ) + budget_file = flopy.utils.CellBudgetFile(function_tmpdir / budget_file, tdis=tdis) budget_file.reverse(budget_file_rev_path) - budget_file_rev = flopy.utils.CellBudgetFile( - budget_file_rev_path, tdis=tdis - ) + budget_file_rev = flopy.utils.CellBudgetFile(budget_file_rev_path, tdis=tdis) for kper in range(nper): assert np.allclose(heads[kper], heads_rev[-kper + 1]) budget = budget_file.get_data(text="FLOW-JA-FACE", totim=kper)[0] - budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[ - 0 - ] + budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[0] assert budget.shape == budget_rev.shape assert np.allclose(budget, -budget_rev) @@ -544,9 +526,7 @@ def test_binaryfile_reverse_mf6_dis(function_tmpdir): @requires_pkg("shapely") def test_binaryfile_reverse_mf6_disv(function_tmpdir): name = "reverse_disv" - sim = flopy.mf6.MFSimulation( - sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" - ) + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6") tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)] nper = len(tdis_rc) tdis = flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=tdis_rc) @@ -583,20 +563,14 @@ def test_binaryfile_reverse_mf6_disv(function_tmpdir): # reverse budget and write to separate file budget_file_rev_path = function_tmpdir / f"{budget_file}_rev" - budget_file = flopy.utils.CellBudgetFile( - function_tmpdir / budget_file, tdis=tdis - ) + budget_file = flopy.utils.CellBudgetFile(function_tmpdir / budget_file, tdis=tdis) budget_file.reverse(budget_file_rev_path) - budget_file_rev = flopy.utils.CellBudgetFile( - budget_file_rev_path, tdis=tdis - ) + budget_file_rev = flopy.utils.CellBudgetFile(budget_file_rev_path, tdis=tdis) for kper in range(nper): assert np.allclose(heads[kper], heads_rev[-kper + 1]) budget = budget_file.get_data(text="FLOW-JA-FACE", totim=kper)[0] - budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[ - 0 - ] + budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[0] assert budget.shape == budget_rev.shape assert np.allclose(budget, -budget_rev) @@ -609,9 +583,7 @@ def test_binaryfile_reverse_mf6_disu(example_data_path, function_tmpdir): ) tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)] nper = len(tdis_rc) - tdis = flopy.mf6.ModflowTdis( - sim, time_units="DAYS", nper=nper, perioddata=tdis_rc - ) + tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc) sim.set_sim_path(function_tmpdir) sim.write_simulation() sim.run_simulation() @@ -665,9 +637,7 @@ def test_binaryfile_reverse_mf6_disu(example_data_path, function_tmpdir): assert np.array_equal(f_data[0][0], rf_data[0][0]) budget = budget_file.get_data(text="FLOW-JA-FACE", totim=idx)[0] - budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=idx)[ - 0 - ] + budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=idx)[0] assert budget.shape == budget_rev.shape assert np.allclose(budget, -budget_rev) diff --git a/autotest/test_binarygrid_util.py b/autotest/test_binarygrid_util.py index 4c79aaf7d8..4750c3fef3 100644 --- a/autotest/test_binarygrid_util.py +++ b/autotest/test_binarygrid_util.py @@ -43,27 +43,20 @@ def test_mfgrddis_modelgrid(mfgrd_test_path): plt.close() extents = modelgrid.extent - errmsg = ( - f"extents {extents} of {fn} does not equal (0.0, 8000.0, 0.0, 8000.0)" - ) + errmsg = f"extents {extents} of {fn} does not equal (0.0, 8000.0, 0.0, 8000.0)" assert extents == (0.0, 8000.0, 0.0, 8000.0), errmsg ncpl = modelgrid.ncol * modelgrid.nrow - assert ( - modelgrid.ncpl == ncpl - ), f"ncpl ({modelgrid.ncpl}) does not equal {ncpl}" + assert modelgrid.ncpl == ncpl, f"ncpl ({modelgrid.ncpl}) does not equal {ncpl}" nvert = modelgrid.nvert iverts = modelgrid.iverts maxvertex = max([max(sublist[1:]) for sublist in iverts]) - assert ( - maxvertex + 1 == nvert - ), f"nvert ({maxvertex + 1}) does not equal {nvert}" + assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}" verts = modelgrid.verts - assert nvert == verts.shape[0], ( - f"number of vertex (x, y) pairs ({verts.shape[0]}) " - f"does not equal {nvert}" - ) + assert ( + nvert == verts.shape[0] + ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}" def test_mfgrddisv_MfGrdFile(mfgrd_test_path): @@ -107,19 +100,14 @@ def test_mfgrddisv_modelgrid(mfgrd_test_path): nvert = mg.nvert iverts = mg.iverts maxvertex = max([max(sublist[1:]) for sublist in iverts]) - assert ( - maxvertex + 1 == nvert - ), f"nvert ({maxvertex + 1}) does not equal {nvert}" + assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}" verts = mg.verts - assert nvert == verts.shape[0], ( - f"number of vertex (x, y) pairs ({verts.shape[0]}) " - f"does not equal {nvert}" - ) + assert ( + nvert == verts.shape[0] + ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}" cellxy = np.column_stack(mg.xyzcellcenters[:2]) - errmsg = ( - f"shape of flow.disv centroids {cellxy.shape} not equal to (218, 2)." - ) + errmsg = f"shape of flow.disv centroids {cellxy.shape} not equal to (218, 2)." assert cellxy.shape == (218, 2), errmsg @@ -166,11 +154,8 @@ def test_mfgrddisu_modelgrid(mfgrd_test_path): nvert = mg.nvert iverts = mg.iverts maxvertex = max([max(sublist[1:]) for sublist in iverts]) - assert ( - maxvertex + 1 == nvert - ), f"nvert ({maxvertex + 1}) does not equal {nvert}" + assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}" verts = mg.verts - assert nvert == verts.shape[0], ( - f"number of vertex (x, y) pairs ({verts.shape[0]}) " - f"does not equal {nvert}" - ) + assert ( + nvert == verts.shape[0] + ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}" diff --git a/autotest/test_cbc_full3D.py b/autotest/test_cbc_full3D.py index 54bad1064e..d714b777d8 100644 --- a/autotest/test_cbc_full3D.py +++ b/autotest/test_cbc_full3D.py @@ -73,14 +73,14 @@ def load_mf6(path, ws_out): def cbc_eval_size(cbcobj, nnodes, shape3d): cbc_pth = cbcobj.filename - assert cbcobj.nnodes == nnodes, ( - f"{cbc_pth} nnodes ({cbcobj.nnodes}) " f"does not equal {nnodes}" - ) + assert ( + cbcobj.nnodes == nnodes + ), f"{cbc_pth} nnodes ({cbcobj.nnodes}) does not equal {nnodes}" a = np.squeeze(np.ones(cbcobj.shape, dtype=float)) b = np.squeeze(np.ones(shape3d, dtype=float)) - assert a.shape == b.shape, ( - f"{cbc_pth} shape {cbcobj.shape} " f"does not conform to {shape3d}" - ) + assert ( + a.shape == b.shape + ), f"{cbc_pth} shape {cbcobj.shape} does not conform to {shape3d}" def cbc_eval_data(cbcobj, shape3d): @@ -92,9 +92,7 @@ def cbc_eval_data(cbcobj, shape3d): times = cbcobj.get_times() for name in names: text = name.strip() - arr = np.squeeze( - cbcobj.get_data(text=text, totim=times[0], full3D=True)[0] - ) + arr = np.squeeze(cbcobj.get_data(text=text, totim=times[0], full3D=True)[0]) if text != "FLOW-JA-FACE": b = np.squeeze(np.ones(shape3d, dtype=float)) assert arr.shape == b.shape, ( diff --git a/autotest/test_cellbudgetfile.py b/autotest/test_cellbudgetfile.py index ebcaf15a94..3eb64447e7 100644 --- a/autotest/test_cellbudgetfile.py +++ b/autotest/test_cellbudgetfile.py @@ -484,15 +484,13 @@ def test_cellbudgetfile_readrecord(example_data_path): with pytest.raises(TypeError) as e: v.get_data() - assert str(e.value).startswith( - "get_data() missing 1 required argument" - ), str(e.exception) + assert str(e.value).startswith("get_data() missing 1 required argument"), str( + e.exception + ) t = v.get_data(text="STREAM LEAKAGE") assert len(t) == 30, "length of stream leakage data != 30" - assert ( - t[0].shape[0] == 36 - ), "sfr budget data does not have 36 reach entries" + assert t[0].shape[0] == 36, "sfr budget data does not have 36 reach entries" t = v.get_data(text="STREAM LEAKAGE", full3D=True) assert t[0].shape == (1, 15, 10), ( @@ -597,9 +595,7 @@ def test_cellbudgetfile_reverse_mf2005(example_data_path, function_tmpdir): sim_name = "test1tr" # load simulation and extract tdis - sim = MFSimulation.load( - sim_name=sim_name, sim_ws=example_data_path / "mf2005_test" - ) + sim = MFSimulation.load(sim_name=sim_name, sim_ws=example_data_path / "mf2005_test") tdis = sim.get_package("tdis") mf2005_model_path = example_data_path / sim_name diff --git a/autotest/test_compare.py b/autotest/test_compare.py index f96865cc3b..3a961243b2 100644 --- a/autotest/test_compare.py +++ b/autotest/test_compare.py @@ -26,9 +26,7 @@ def test_diffmax(): a1 = np.array([1, 2, 3]) a2 = np.array([4, 5, 7]) d, indices = _diffmax(a1, a2) - indices = indices[ - 0 - ] # return value is a tuple of arrays (1 for each dimension) + indices = indices[0] # return value is a tuple of arrays (1 for each dimension) assert d == 4 assert list(indices) == [2] @@ -37,9 +35,7 @@ def test_difftol(): a1 = np.array([1, 2, 3]) a2 = np.array([3, 5, 7]) d, indices = _difftol(a1, a2, 2.5) - indices = indices[ - 0 - ] # return value is a tuple of arrays (1 for each dimension) + indices = indices[0] # return value is a tuple of arrays (1 for each dimension) assert d == 4 print(d, indices) assert list(indices) == [1, 2] @@ -123,9 +119,7 @@ def comparison_model_1(function_tmpdir): m.remove_package("WEL") # recreate well package with binary output - wel = ModflowWel( - m, stress_period_data=wel_data, binary=True, dtype=wd.dtype - ) + wel = ModflowWel(m, stress_period_data=wel_data, binary=True, dtype=wd.dtype) m.write_input() diff --git a/autotest/test_dis_cases.py b/autotest/test_dis_cases.py index ae6ba62e96..41d527924d 100644 --- a/autotest/test_dis_cases.py +++ b/autotest/test_dis_cases.py @@ -62,9 +62,7 @@ def get_vlist(i, j, nrow, ncol): [icpl, cellxy[icpl, 0], cellxy[icpl, 1], 4] + iverts[icpl] for icpl in range(ncpl) ] - vertices = [ - [ivert, verts[ivert, 0], verts[ivert, 1]] for ivert in range(nvert) - ] + vertices = [[ivert, verts[ivert, 0], verts[ivert, 1]] for ivert in range(nvert)] xorigin = 3000 yorigin = 1000 angrot = 10 diff --git a/autotest/test_export.py b/autotest/test_export.py index 4c56f0e091..25b7c022c9 100644 --- a/autotest/test_export.py +++ b/autotest/test_export.py @@ -128,12 +128,8 @@ def disu_sim(name, tmpdir, missing_arrays=False): gwf = ModflowGwf(sim, modelname=name, save_flows=True) dis = ModflowGwfdisu(gwf, **gridprops) - ic = ModflowGwfic( - gwf, strt=np.random.random_sample(gwf.modelgrid.nnodes) * 350 - ) - npf = ModflowGwfnpf( - gwf, k=np.random.random_sample(gwf.modelgrid.nnodes) * 10 - ) + ic = ModflowGwfic(gwf, strt=np.random.random_sample(gwf.modelgrid.nnodes) * 350) + npf = ModflowGwfnpf(gwf, k=np.random.random_sample(gwf.modelgrid.nnodes) * 10) return sim @@ -178,9 +174,7 @@ def unstructured_grid(example_data_path): @requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.parametrize("pathlike", (True, False)) -def test_output_helper_shapefile_export( - pathlike, function_tmpdir, example_data_path -): +def test_output_helper_shapefile_export(pathlike, function_tmpdir, example_data_path): ml = Modflow.load( "freyberg.nam", model_ws=str(example_data_path / "freyberg_multilayer_transient"), @@ -208,9 +202,7 @@ def test_freyberg_export(function_tmpdir, example_data_path): name = "freyberg" namfile = f"{name}.nam" ws = example_data_path / name - m = flopy.modflow.Modflow.load( - namfile, model_ws=ws, check=False, verbose=False - ) + m = flopy.modflow.Modflow.load(namfile, model_ws=ws, check=False, verbose=False) # test export at model, package and object levels shpfile_path = function_tmpdir / "model.shp" @@ -249,9 +241,7 @@ def test_freyberg_export(function_tmpdir, example_data_path): part.unlink() assert not shape.with_suffix(".prj").exists() - m.modelgrid = StructuredGrid( - delc=m.dis.delc.array, delr=m.dis.delr.array, crs=3070 - ) + m.modelgrid = StructuredGrid(delc=m.dis.delc.array, delr=m.dis.delr.array, crs=3070) # test export with a modelgrid, regardless of whether or not wkt was found m.drn.stress_period_data.export(shape, sparse=True) for suffix in [".dbf", ".prj", ".shp", ".shx"]: @@ -259,9 +249,7 @@ def test_freyberg_export(function_tmpdir, example_data_path): assert part.exists() part.unlink() - m.modelgrid = StructuredGrid( - delc=m.dis.delc.array, delr=m.dis.delr.array, crs=3070 - ) + m.modelgrid = StructuredGrid(delc=m.dis.delc.array, delr=m.dis.delr.array, crs=3070) # verify that attributes have same modelgrid as parent assert m.drn.stress_period_data.mg.crs == m.modelgrid.crs assert m.drn.stress_period_data.mg.xoffset == m.modelgrid.xoffset @@ -320,17 +308,13 @@ def test_disu_export(function_tmpdir, missing_arrays): @pytest.mark.parametrize("crs", (None, 26916)) @requires_pkg("netCDF4", "pyproj") def test_export_output(crs, function_tmpdir, example_data_path): - ml = Modflow.load( - "freyberg.nam", model_ws=str(example_data_path / "freyberg") - ) + ml = Modflow.load("freyberg.nam", model_ws=str(example_data_path / "freyberg")) ml.modelgrid.crs = crs hds_pth = os.path.join(ml.model_ws, "freyberg.githds") hds = flopy.utils.HeadFile(hds_pth) out_pth = function_tmpdir / f"freyberg_{crs}.out.nc" - nc = flopy.export.utils.output_helper( - out_pth, ml, {"freyberg.githds": hds} - ) + nc = flopy.export.utils.output_helper(out_pth, ml, {"freyberg.githds": hds}) var = nc.nc.variables.get("head") arr = var[:] ibound_mask = ml.bas6.ibound.array == 0 @@ -393,9 +377,7 @@ def test_export_shapefile_polygon_closed(function_tmpdir): m = flopy.modflow.Modflow("test.nam", crs="EPSG:32614", xll=xll, yll=yll) - flopy.modflow.ModflowDis( - m, delr=spacing, delc=spacing, nrow=nrow, ncol=ncol - ) + flopy.modflow.ModflowDis(m, delr=spacing, delc=spacing, nrow=nrow, ncol=ncol) shp_file = os.path.join(function_tmpdir, "test_polygon.shp") m.dis.export(shp_file) @@ -448,8 +430,7 @@ def test_export_array(function_tmpdir, example_data_path): if "cellsize" in line.lower(): val = float(line.strip().split()[-1]) rot_cellsize = ( - np.cos(np.radians(m.modelgrid.angrot)) - * m.modelgrid.delr[0] + np.cos(np.radians(m.modelgrid.angrot)) * m.modelgrid.delr[0] ) break @@ -617,9 +598,7 @@ def test_export_array2(function_tmpdir): crs = 4431 # no epsg code - modelgrid = StructuredGrid( - delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1 - ) + modelgrid = StructuredGrid(delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1) filename = os.path.join(function_tmpdir, "myarray1.shp") a = np.arange(nrow * ncol).reshape((nrow, ncol)) export_array(modelgrid, filename, a) @@ -635,9 +614,7 @@ def test_export_array2(function_tmpdir): assert os.path.isfile(filename), "did not create array shapefile" # with passing in epsg code - modelgrid = StructuredGrid( - delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1 - ) + modelgrid = StructuredGrid(delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1) filename = os.path.join(function_tmpdir, "myarray3.shp") a = np.arange(nrow * ncol).reshape((nrow, ncol)) export_array(modelgrid, filename, a, crs=crs) @@ -710,9 +687,7 @@ def test_export_array_contours_structured(function_tmpdir): crs = 4431 # no epsg code - modelgrid = StructuredGrid( - delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1 - ) + modelgrid = StructuredGrid(delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1) filename = function_tmpdir / "myarraycontours1.shp" a = np.arange(nrow * ncol).reshape((nrow, ncol)) export_array_contours(modelgrid, filename, a) @@ -730,9 +705,7 @@ def test_export_array_contours_structured(function_tmpdir): assert os.path.isfile(filename), "did not create contour shapefile" # with passing in coordinate reference - modelgrid = StructuredGrid( - delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1 - ) + modelgrid = StructuredGrid(delr=np.ones(ncol) * 1.1, delc=np.ones(nrow) * 1.1) filename = function_tmpdir / "myarraycontours3.shp" a = np.arange(nrow * ncol).reshape((nrow, ncol)) export_array_contours(modelgrid, filename, a, crs=crs) @@ -740,9 +713,7 @@ def test_export_array_contours_structured(function_tmpdir): @requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) -def test_export_array_contours_unstructured( - function_tmpdir, unstructured_grid -): +def test_export_array_contours_unstructured(function_tmpdir, unstructured_grid): from shapefile import Reader grid = unstructured_grid @@ -824,9 +795,7 @@ def test_export_contourf(function_tmpdir, example_data_path): with Reader(filename) as r: shapes = r.shapes() # expect 65 with standard mpl contours (structured grids), 86 with tricontours - assert ( - len(shapes) >= 65 - ), "multipolygons were skipped in contourf routine" + assert len(shapes) >= 65, "multipolygons were skipped in contourf routine" # debugging # for s in shapes: @@ -850,9 +819,7 @@ def test_export_contours(function_tmpdir, example_data_path): levels = np.arange(10, 30, 0.5) mapview = flopy.plot.PlotMapView(model=ml) - contour_set = mapview.contour_array( - head, masked_values=[999.0], levels=levels - ) + contour_set = mapview.contour_array(head, masked_values=[999.0], levels=levels) export_contours(filename, contour_set) plt.close() @@ -940,17 +907,13 @@ def test_export_mf6_shp(function_tmpdir): tdis = flopy.mf6.modflow.mftdis.ModflowTdis( sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=perioddata ) - gwf = flopy.mf6.ModflowGwf( - sim, modelname=mf6name, model_nam_file=f"{mf6name}.nam" - ) + gwf = flopy.mf6.ModflowGwf(sim, modelname=mf6name, model_nam_file=f"{mf6name}.nam") dis6 = flopy.mf6.ModflowGwfdis( gwf, pname="dis", nlay=nlay, nrow=nrow, ncol=ncol, top=top, botm=botm ) # Riv6 - spd6 = flopy.mf6.ModflowGwfriv.stress_period_data.empty( - gwf, maxbound=len(spd) - ) + spd6 = flopy.mf6.ModflowGwfriv.stress_period_data.empty(gwf, maxbound=len(spd)) spd6[0]["cellid"] = list(zip(spd.k, spd.i, spd.j)) for c in spd.dtype.names: if c in spd6[0].dtype.names: @@ -1031,9 +994,7 @@ def test_export_huge_shapefile(function_tmpdir): tsmult = 1 botm = np.zeros((nlay, nrow, ncol)) - m = flopy.modflow.Modflow( - "junk", version="mfnwt", model_ws=function_tmpdir - ) + m = flopy.modflow.Modflow("junk", version="mfnwt", model_ws=function_tmpdir) flopy.modflow.ModflowDis( m, nlay=nlay, @@ -1201,9 +1162,7 @@ def test_vtk_export_array2d(function_tmpdir, example_data_path): # test mf 2005 freyberg mpath = example_data_path / "freyberg_multilayer_transient" namfile = "freyberg.nam" - m = Modflow.load( - namfile, model_ws=mpath, verbose=False, load_only=["dis", "bas6"] - ) + m = Modflow.load(namfile, model_ws=mpath, verbose=False, load_only=["dis", "bas6"]) # export and check m.dis.top.export(function_tmpdir, name="top", fmt="vtk", binary=False) @@ -1362,17 +1321,13 @@ def test_vtk_binary_head_export(function_tmpdir, example_data_path): namfile = "freyberg.nam" hdsfile = mpth / "freyberg.hds" heads = HeadFile(hdsfile) - m = Modflow.load( - namfile, model_ws=mpth, verbose=False, load_only=["dis", "bas6"] - ) + m = Modflow.load(namfile, model_ws=mpth, verbose=False, load_only=["dis", "bas6"]) filetocheck = function_tmpdir / "freyberg_head_000003.vtu" # export and check vtkobj = Vtk(m, pvd=True, xml=True) - vtkobj.add_heads( - heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)] - ) + vtkobj.add_heads(heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)]) vtkobj.write(function_tmpdir / "freyberg_head") assert count_lines_in_file(filetocheck) == 34 @@ -1381,9 +1336,7 @@ def test_vtk_binary_head_export(function_tmpdir, example_data_path): # with point scalars vtkobj = Vtk(m, pvd=True, xml=True, point_scalars=True) - vtkobj.add_heads( - heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)] - ) + vtkobj.add_heads(heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)]) vtkobj.write(function_tmpdir / "freyberg_head") assert count_lines_in_file(filetocheck) == 34 @@ -1392,9 +1345,7 @@ def test_vtk_binary_head_export(function_tmpdir, example_data_path): # with smoothing vtkobj = Vtk(m, pvd=True, xml=True, smooth=True) - vtkobj.add_heads( - heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)] - ) + vtkobj.add_heads(heads, kstpkper=[(0, 0), (0, 199), (0, 354), (0, 454), (0, 1089)]) vtkobj.write(function_tmpdir / "freyberg_head") assert count_lines_in_file(filetocheck) == 34 @@ -1409,27 +1360,20 @@ def test_vtk_cbc(function_tmpdir, example_data_path): namfile = "freyberg.nam" cbcfile = os.path.join(mpth, "freyberg.cbc") cbc = CellBudgetFile(cbcfile) - m = Modflow.load( - namfile, model_ws=mpth, verbose=False, load_only=["dis", "bas6"] - ) + m = Modflow.load(namfile, model_ws=mpth, verbose=False, load_only=["dis", "bas6"]) # export and check with point scalar vtkobj = Vtk(m, binary=False, xml=True, pvd=True, point_scalars=True) vtkobj.add_cell_budget(cbc, kstpkper=[(0, 0), (0, 1), (0, 2)]) vtkobj.write(function_tmpdir / "freyberg_CBC") - assert ( - count_lines_in_file(function_tmpdir / "freyberg_CBC_000000.vtu") - == 39243 - ) + assert count_lines_in_file(function_tmpdir / "freyberg_CBC_000000.vtu") == 39243 # with point scalars and binary vtkobj = Vtk(m, xml=True, pvd=True, point_scalars=True) vtkobj.add_cell_budget(cbc, kstpkper=[(0, 0), (0, 1), (0, 2)]) vtkobj.write(function_tmpdir / "freyberg_CBC") - assert ( - count_lines_in_file(function_tmpdir / "freyberg_CBC_000000.vtu") == 28 - ) + assert count_lines_in_file(function_tmpdir / "freyberg_CBC_000000.vtu") == 28 @requires_pkg("vtk") @@ -1492,9 +1436,7 @@ def test_vtk_unstructured(function_tmpdir, unstructured_grid): grid = unstructured_grid outfile = function_tmpdir / "disu_grid.vtu" - vtkobj = Vtk( - modelgrid=grid, vertical_exageration=2, binary=True, smooth=False - ) + vtkobj = Vtk(modelgrid=grid, vertical_exageration=2, binary=True, smooth=False) vtkobj.add_array(grid.top, "top") vtkobj.add_array(grid.botm, "botm") vtkobj.write(outfile) @@ -1510,9 +1452,7 @@ def test_vtk_unstructured(function_tmpdir, unstructured_grid): top2 = vtk_to_numpy(data.GetCellData().GetArray("top")) - assert np.allclose( - np.ravel(grid.top), top2 - ), "Field data not properly written" + assert np.allclose(np.ravel(grid.top), top2), "Field data not properly written" @requires_pkg("vtk", "pyvista") @@ -1616,9 +1556,7 @@ def test_vtk_pathline(function_tmpdir, example_data_path): prsity=0.2, prsityCB=0.2, ) - sim = mpp.create_mpsim( - trackdir="backward", simtype="pathline", packages="WEL" - ) + sim = mpp.create_mpsim(trackdir="backward", simtype="pathline", packages="WEL") mpp.write_input() mpp.run_model() @@ -1647,9 +1585,7 @@ def test_vtk_pathline(function_tmpdir, example_data_path): from vtkmodules.util import numpy_support totim = numpy_support.vtk_to_numpy(data.GetPointData().GetArray("time")) - pid = numpy_support.vtk_to_numpy( - data.GetPointData().GetArray("particleid") - ) + pid = numpy_support.vtk_to_numpy(data.GetPointData().GetArray("particleid")) maxtime = 0 for p in plines: @@ -1669,9 +1605,7 @@ def grid2disvgrid(nrow, ncol): def lower_left_point(i, j, ncol): return i * (ncol + 1) + j - mg = np.meshgrid( - np.linspace(0, ncol, ncol + 1), np.linspace(0, nrow, nrow + 1) - ) + mg = np.meshgrid(np.linspace(0, ncol, ncol + 1), np.linspace(0, nrow, nrow + 1)) verts = np.vstack((mg[0].flatten(), mg[1].flatten())).transpose() # in the creation of iverts here, we intentionally do not close the cell polygon @@ -1688,9 +1622,7 @@ def lower_left_point(i, j, ncol): def load_verts(fname): - verts = np.genfromtxt( - fname, dtype=[int, float, float], names=["iv", "x", "y"] - ) + verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"]) verts["iv"] -= 1 # zero based return verts @@ -1727,9 +1659,7 @@ def test_vtk_add_model_without_packages_names(function_tmpdir): dis = ModflowGwfdis(gwf, nrow=3, ncol=3) ic = ModflowGwfic(gwf) npf = ModflowGwfnpf(gwf, save_specific_discharge=True) - chd = ModflowGwfchd( - gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 2, 2), 0.0]] - ) + chd = ModflowGwfchd(gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 2, 2), 0.0]]) # Export model without specifying packages_names parameter diff --git a/autotest/test_flopy_io.py b/autotest/test_flopy_io.py index bb09cd2207..9ee570d044 100644 --- a/autotest/test_flopy_io.py +++ b/autotest/test_flopy_io.py @@ -29,32 +29,22 @@ def test_relpath_safe(function_tmpdir, scrub, use_paths): and splitdrive(function_tmpdir)[0] != splitdrive(getcwd())[0] ): if use_paths: - assert ( - Path(relpath_safe(function_tmpdir)) - == function_tmpdir.absolute() - ) + assert Path(relpath_safe(function_tmpdir)) == function_tmpdir.absolute() assert relpath_safe(Path(which("mf6"))) == str( Path(which("mf6")).absolute() ) else: assert ( - Path(relpath_safe(str(function_tmpdir))) - == function_tmpdir.absolute() - ) - assert relpath_safe(which("mf6")) == str( - Path(which("mf6")).absolute() + Path(relpath_safe(str(function_tmpdir))) == function_tmpdir.absolute() ) + assert relpath_safe(which("mf6")) == str(Path(which("mf6")).absolute()) else: if use_paths: - assert Path( - relpath_safe(function_tmpdir, function_tmpdir.parent) - ) == Path(function_tmpdir.name) + assert Path(relpath_safe(function_tmpdir, function_tmpdir.parent)) == Path( + function_tmpdir.name + ) assert ( - Path( - relpath_safe( - function_tmpdir, function_tmpdir.parent.parent - ) - ) + Path(relpath_safe(function_tmpdir, function_tmpdir.parent.parent)) == Path(function_tmpdir.parent.name) / function_tmpdir.name ) assert relpath_safe(Path(which("mf6"))) == relpath( @@ -73,9 +63,7 @@ def test_relpath_safe(function_tmpdir, scrub, use_paths): ) == Path(function_tmpdir.parent.name) / function_tmpdir.name ) - assert relpath_safe(which("mf6")) == relpath( - which("mf6"), getcwd() - ) + assert relpath_safe(which("mf6")) == relpath(which("mf6"), getcwd()) # test user login obfuscation with set_dir("/"): diff --git a/autotest/test_flopy_module.py b/autotest/test_flopy_module.py index 1a4bd40ea3..1114b96009 100644 --- a/autotest/test_flopy_module.py +++ b/autotest/test_flopy_module.py @@ -58,9 +58,7 @@ def test_modflow_unstructured(function_tmpdir): wel = flopy.mfusg.MfUsgWel(mf, stress_period_data={0: [[0, -100]]}) assert isinstance(wel, flopy.mfusg.MfUsgWel) - ghb = flopy.modflow.ModflowGhb( - mf, stress_period_data={0: [[1, 5.9, 1000.0]]} - ) + ghb = flopy.modflow.ModflowGhb(mf, stress_period_data={0: [[1, 5.9, 1000.0]]}) assert isinstance(ghb, flopy.modflow.ModflowGhb) oc = flopy.modflow.ModflowOc(mf) @@ -141,9 +139,7 @@ def test_mflist_reference(function_tmpdir): # assert shp.numRecords == nrow * ncol -def test_pyinstaller_flopy_runs_without_dfn_folder( - flopy_data_path, example_data_path -): +def test_pyinstaller_flopy_runs_without_dfn_folder(flopy_data_path, example_data_path): """ Test to ensure that flopy can load a modflow 6 simulation without dfn files being present. diff --git a/autotest/test_gage.py b/autotest/test_gage.py index 04d3879b06..93d1c0a168 100644 --- a/autotest/test_gage.py +++ b/autotest/test_gage.py @@ -121,9 +121,7 @@ def test_gage_files(function_tmpdir): break assert found, f"{f} not in name file entries" iu = abs(gages[idx][1]) - assert ( - iu == iun - ), f"{f} unit not equal to {iu} - name file unit = {iun}" + assert iu == iun, f"{f} unit not equal to {iu} - name file unit = {iun}" def test_gage_filenames0(function_tmpdir): @@ -207,6 +205,4 @@ def test_gage_filenames(function_tmpdir): break assert found, f"{f} not in name file entries" iu = abs(gages[idx][1]) - assert ( - iu == iun - ), f"{f} unit not equal to {iu} - name file unit = {iun}" + assert iu == iun, f"{f} unit not equal to {iu} - name file unit = {iun}" diff --git a/autotest/test_geospatial_util.py b/autotest/test_geospatial_util.py index 9132e1d1be..aa69493ef1 100644 --- a/autotest/test_geospatial_util.py +++ b/autotest/test_geospatial_util.py @@ -422,9 +422,7 @@ def test_point_collection(point, multipoint): is_equal = gi == gi1[ix] if not is_equal: - raise AssertionError( - "GeoSpatialCollection Point conversion error" - ) + raise AssertionError("GeoSpatialCollection Point conversion error") @requires_pkg("shapely", "geojson", "geopandas") @@ -452,9 +450,7 @@ def test_linestring_collection(linestring, multilinestring): is_equal = gi == gi1[ix] if not is_equal: - raise AssertionError( - "GeoSpatialCollection Linestring conversion error" - ) + raise AssertionError("GeoSpatialCollection Linestring conversion error") @requires_pkg("shapely", "geojson", "geopandas") diff --git a/autotest/test_get_modflow.py b/autotest/test_get_modflow.py index d5dc4d6791..58d8e27d94 100644 --- a/autotest/test_get_modflow.py +++ b/autotest/test_get_modflow.py @@ -23,8 +23,7 @@ "flopy": Path(expandvars(r"%LOCALAPPDATA%\flopy")) / "bin" if system() == "Windows" else Path.home() / ".local" / "share" / "flopy" / "bin", - "python": Path(sys.prefix) - / ("Scripts" if system() == "Windows" else "bin"), + "python": Path(sys.prefix) / ("Scripts" if system() == "Windows" else "bin"), "home": Path.home() / ".local" / "bin", } owner_options = [ @@ -128,9 +127,7 @@ def test_get_release(repo): } else: for ostag in expected_ostags: - assert any( - ostag in a for a in actual_assets - ), f"dist not found for {ostag}" + assert any(ostag in a for a in actual_assets), f"dist not found for {ostag}" @pytest.mark.parametrize("bindir", bindir_options.keys()) @@ -276,9 +273,7 @@ def test_script(function_tmpdir, owner, repo, downloads_dir): def test_python_api(function_tmpdir, owner, repo, downloads_dir): bindir = str(function_tmpdir) try: - get_modflow( - bindir, owner=owner, repo=repo, downloads_dir=downloads_dir - ) + get_modflow(bindir, owner=owner, repo=repo, downloads_dir=downloads_dir) except HTTPError as err: if err.code == 403: pytest.skip(f"GitHub {rate_limit_msg}") diff --git a/autotest/test_grid.py b/autotest/test_grid.py index f5eba2f233..0cec2fa2c1 100644 --- a/autotest/test_grid.py +++ b/autotest/test_grid.py @@ -87,9 +87,7 @@ def test_rotation(): mg2 = StructuredGrid(delc=m.dis.delc.array, delr=m.dis.delr.array) mg2._angrot = -45.0 - mg2.set_coord_info( - mg2._xul_to_xll(xul), mg2._yul_to_yll(yul), angrot=-45.0 - ) + mg2.set_coord_info(mg2._xul_to_xll(xul), mg2._yul_to_yll(yul), angrot=-45.0) xll2, yll2 = mg2.xoffset, mg2.yoffset assert np.abs(mg2.xvertices[0, 0] - xul) < 1e-4 @@ -178,9 +176,7 @@ def test_get_lrc_get_node(): nlay, nrow, ncol = 3, 4, 5 nnodes = nlay * nrow * ncol ml = Modflow() - dis = ModflowDis( - ml, nlay=nlay, nrow=nrow, ncol=ncol, top=50, botm=[0, -1, -2] - ) + dis = ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol, top=50, botm=[0, -1, -2]) nodes = list(range(nnodes)) indices = np.indices((nlay, nrow, ncol)) layers = indices[0].flatten() @@ -236,9 +232,7 @@ def test_get_rc_from_node_coordinates(): def load_verts(fname): - verts = np.genfromtxt( - fname, dtype=[int, float, float], names=["iv", "x", "y"] - ) + verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"]) verts["iv"] -= 1 # zero based return verts @@ -303,16 +297,12 @@ def test_intersection(dis_model, disv_model): else: print("In real_world coordinates:") try: - row, col = dis_model.modelgrid.intersect( - x, y, local=local, forgive=forgive - ) + row, col = dis_model.modelgrid.intersect(x, y, local=local, forgive=forgive) cell2d_disv = disv_model.modelgrid.intersect( x, y, local=local, forgive=forgive ) except Exception as e: - if not forgive and any( - ["outside of the model area" in k for k in e.args] - ): + if not forgive and any(["outside of the model area" in k for k in e.args]): pass else: # should be forgiving x,y out of grid raise e @@ -352,9 +342,7 @@ def test_structured_xyz_intersect(example_data_path): def test_vertex_xyz_intersect(example_data_path): - sim = MFSimulation.load( - sim_ws=example_data_path / "mf6" / "test003_gwfs_disv" - ) + sim = MFSimulation.load(sim_ws=example_data_path / "mf6" / "test003_gwfs_disv") ml = sim.get_model(list(sim.model_names)[0]) mg = ml.modelgrid @@ -457,21 +445,16 @@ def test_structured_from_gridspec(example_data_path, spc_file): ), errmsg ncpl = modelgrid.ncol * modelgrid.nrow - assert ( - modelgrid.ncpl == ncpl - ), f"ncpl ({modelgrid.ncpl}) does not equal {ncpl}" + assert modelgrid.ncpl == ncpl, f"ncpl ({modelgrid.ncpl}) does not equal {ncpl}" nvert = modelgrid.nvert iverts = modelgrid.iverts maxvertex = max([max(sublist[1:]) for sublist in iverts]) - assert ( - maxvertex + 1 == nvert - ), f"nvert ({maxvertex + 1}) does not equal {nvert}" + assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}" verts = modelgrid.verts - assert nvert == verts.shape[0], ( - f"number of vertex (x, y) pairs ({verts.shape[0]}) " - f"does not equal {nvert}" - ) + assert ( + nvert == verts.shape[0] + ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}" @requires_pkg("shapely") @@ -485,17 +468,13 @@ def test_unstructured_from_argus_mesh(example_data_path): print(f" Number of nodes: {g.nnodes}") -def test_unstructured_from_verts_and_iverts( - function_tmpdir, example_data_path -): +def test_unstructured_from_verts_and_iverts(function_tmpdir, example_data_path): datapth = example_data_path / "unstructured" # simple functions to load vertices and incidence lists def load_verts(fname): print(f"Loading vertices from: {fname}") - verts = np.genfromtxt( - fname, dtype=[int, float, float], names=["iv", "x", "y"] - ) + verts = np.genfromtxt(fname, dtype=[int, float, float], names=["iv", "x", "y"]) verts["iv"] -= 1 # zero based return verts @@ -553,8 +532,7 @@ def unstructured_from_gridspec_driver(example_data_path, gsf_file): # check vertices expected_verts = [ - (float(s[0]), float(s[1]), float(s[2])) - for s in split[3 : (3 + nverts)] + (float(s[0]), float(s[1]), float(s[2])) for s in split[3 : (3 + nverts)] ] for i, ev in enumerate(expected_verts[:10]): assert grid.verts[i][0] == ev[0] @@ -608,9 +586,7 @@ def test_unstructured_from_gridspec_comments(example_data_path): pytest.param(4269, None, marks=pytest.mark.xfail), ), ) -def test_grid_crs( - minimal_unstructured_grid_info, crs, expected_srs, function_tmpdir -): +def test_grid_crs(minimal_unstructured_grid_info, crs, expected_srs, function_tmpdir): expected_epsg = None if match := re.findall(r"epsg:([\d]+)", expected_srs or "", re.IGNORECASE): expected_epsg = int(match[0]) @@ -636,9 +612,7 @@ def do_checks(g): do_checks(VertexGrid(vertices=d["vertices"], crs=crs)) # only check deprecations if pyproj is available - pyproj_avail_context = ( - pytest.deprecated_call() if HAS_PYPROJ else nullcontext() - ) + pyproj_avail_context = pytest.deprecated_call() if HAS_PYPROJ else nullcontext() # test deprecated 'epsg' parameter if isinstance(crs, int): @@ -722,9 +696,7 @@ def do_checks(g, *, exp_srs=expected_srs, exp_epsg=expected_epsg): do_checks(sg, exp_srs="EPSG:26915", exp_epsg=26915) # only check deprecations if pyproj is available - pyproj_avail_context = ( - pytest.deprecated_call() if HAS_PYPROJ else nullcontext() - ) + pyproj_avail_context = pytest.deprecated_call() if HAS_PYPROJ else nullcontext() # test deprecated 'epsg' parameter if isinstance(crs, int): @@ -923,9 +895,7 @@ def test_tocvfd3(): bt = -100.0 * np.ones((nlay, nrow, ncol)) idomain = np.ones((nlay, nrow, ncol)) idomain[:, 2:5, 2:5] = 0 - sg1 = StructuredGrid( - delr=delr, delc=delc, top=tp, botm=bt, idomain=idomain - ) + sg1 = StructuredGrid(delr=delr, delc=delc, top=tp, botm=bt, idomain=idomain) # inner grid nlay = 1 nrow = ncol = 9 @@ -979,9 +949,7 @@ def test_area_centroid_polygon(): xc, yc = centroid_of_polygon(pts) result = np.array([xc, yc]) answer = np.array((685055.1035824707, 6295543.12059913)) - assert np.allclose( - result, answer - ), "cvfdutil centroid of polygon incorrect" + assert np.allclose(result, answer), "cvfdutil centroid of polygon incorrect" x, y = list(zip(*pts)) result = area_of_polygon(x, y) answer = 11.228131838368032 @@ -1035,9 +1003,7 @@ def test_unstructured_minimal_grid_ctor(minimal_unstructured_grid_info): [(2.0, 1), (2.0, 0.0)], [(2.0, 0), (1.0, 0.0)], ] - assert ( - g.grid_lines == grid_lines - ), f"\n{g.grid_lines} \n /= \n{grid_lines}" + assert g.grid_lines == grid_lines, f"\n{g.grid_lines} \n /= \n{grid_lines}" assert g.extent == (0, 2, 0, 1) xv, yv, zv = g.xyzvertices assert xv == [[0, 1, 1, 0], [1, 2, 2, 1]] @@ -1082,9 +1048,7 @@ def test_unstructured_complete_grid_ctor(minimal_unstructured_grid_info): ], } assert isinstance(g.grid_lines, dict) - assert ( - g.grid_lines == grid_lines - ), f"\n{g.grid_lines} \n /= \n{grid_lines}" + assert g.grid_lines == grid_lines, f"\n{g.grid_lines} \n /= \n{grid_lines}" assert g.extent == (0, 2, 0, 1) xv, yv, zv = g.xyzvertices assert xv == [[0, 1, 1, 0], [1, 2, 2, 1]] @@ -1172,11 +1136,7 @@ def test_voronoi_vertex_grid(function_tmpdir): ), ) def test_voronoi_grid(request, function_tmpdir, grid_info): - name = ( - request.node.name.replace("/", "_") - .replace("\\", "_") - .replace(":", "_") - ) + name = request.node.name.replace("/", "_").replace("\\", "_").replace(":", "_") ncpl, vor, gridprops, grid = grid_info # TODO: debug off-by-3 issue @@ -1228,9 +1188,7 @@ def test_structured_thickness(structured_grid): thickness = structured_grid.cell_thickness assert np.allclose(thickness, 5.0), "thicknesses != 5." - sat_thick = structured_grid.saturated_thickness( - structured_grid.botm + 10.0 - ) + sat_thick = structured_grid.saturated_thickness(structured_grid.botm + 10.0) assert np.allclose(sat_thick, thickness), "saturated thicknesses != 5." sat_thick = structured_grid.saturated_thickness(structured_grid.botm + 5.0) @@ -1242,9 +1200,7 @@ def test_structured_thickness(structured_grid): sat_thick = structured_grid.saturated_thickness(structured_grid.botm) assert np.allclose(sat_thick, 0.0), "saturated thicknesses != 0." - sat_thick = structured_grid.saturated_thickness( - structured_grid.botm - 100.0 - ) + sat_thick = structured_grid.saturated_thickness(structured_grid.botm - 100.0) assert np.allclose(sat_thick, 0.0), "saturated thicknesses != 0." @@ -1272,27 +1228,19 @@ def test_unstructured_thickness(unstructured_grid): thickness = unstructured_grid.cell_thickness assert np.allclose(thickness, 5.0), "thicknesses != 5." - sat_thick = unstructured_grid.saturated_thickness( - unstructured_grid.botm + 10.0 - ) + sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm + 10.0) assert np.allclose(sat_thick, thickness), "saturated thicknesses != 5." - sat_thick = unstructured_grid.saturated_thickness( - unstructured_grid.botm + 5.0 - ) + sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm + 5.0) assert np.allclose(sat_thick, thickness), "saturated thicknesses != 5." - sat_thick = unstructured_grid.saturated_thickness( - unstructured_grid.botm + 2.5 - ) + sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm + 2.5) assert np.allclose(sat_thick, 2.5), "saturated thicknesses != 2.5" sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm) assert np.allclose(sat_thick, 0.0), "saturated thicknesses != 0." - sat_thick = unstructured_grid.saturated_thickness( - unstructured_grid.botm - 100.0 - ) + sat_thick = unstructured_grid.saturated_thickness(unstructured_grid.botm - 100.0) assert np.allclose(sat_thick, 0.0), "saturated thicknesses != 0." @@ -1316,9 +1264,7 @@ def test_unstructured_neighbors(unstructured_grid): rook_neighbors = unstructured_grid.neighbors(5) assert np.allclose(rook_neighbors, [0, 10, 1, 6, 11, 2, 7, 12]) - queen_neighbors = unstructured_grid.neighbors( - 5, method="queen", reset=True - ) + queen_neighbors = unstructured_grid.neighbors(5, method="queen", reset=True) assert np.allclose(queen_neighbors, [0, 10, 1, 6, 11, 2, 3, 7, 8, 12, 13]) @@ -1331,9 +1277,7 @@ def test_structured_ncb_thickness(): ), "grid cell_thickness attribute returns incorrect shape" thickness = grid.remove_confining_beds(grid.cell_thickness) - assert ( - thickness.shape == grid.shape - ), "quasi3d confining beds not properly removed" + assert thickness.shape == grid.shape, "quasi3d confining beds not properly removed" sat_thick = grid.saturated_thickness(grid.cell_thickness) assert ( @@ -1447,9 +1391,7 @@ def test_geo_dataframe(structured_grid, vertex_grid, unstructured_grid): cv = grid.get_cell_vertices(node) for coord in coords: if coord not in cv: - raise AssertionError( - f"Cell vertices incorrect for node={node}" - ) + raise AssertionError(f"Cell vertices incorrect for node={node}") def test_unstructured_iverts_cleanup(): @@ -1508,6 +1450,4 @@ def test_unstructured_iverts_cleanup(): clean_ugrid = ugrid.clean_iverts() if clean_ugrid.nvert != cleaned_vert_num: - raise AssertionError( - "Improper number of vertices for cleaned 'shared' iverts" - ) + raise AssertionError("Improper number of vertices for cleaned 'shared' iverts") diff --git a/autotest/test_grid_cases.py b/autotest/test_grid_cases.py index 5f3e748e21..bb8d7eb97b 100644 --- a/autotest/test_grid_cases.py +++ b/autotest/test_grid_cases.py @@ -264,9 +264,7 @@ def voronoi_rectangle(): xmax = 2.0 ymin = 0.0 ymax = 1.0 - poly = np.array( - ((xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)) - ) + poly = np.array(((xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax))) max_area = 0.001 angle = 30 @@ -351,9 +349,7 @@ def voronoi_polygons(): tri.add_polygon(active_domain) tri.add_polygon(area1) tri.add_polygon(area2) - tri.add_region( - (1, 1), 0, maximum_area=100 - ) # point inside active domain + tri.add_region((1, 1), 0, maximum_area=100) # point inside active domain tri.add_region((11, 11), 1, maximum_area=10) # point inside area1 tri.add_region((61, 61), 2, maximum_area=3) # point inside area2 tri.build(verbose=False) @@ -400,9 +396,7 @@ def voronoi_many_polygons(): # then regions and other polygons should follow tri.add_polygon(area1) tri.add_polygon(area2) - tri.add_region( - (1, 1), 0, maximum_area=100 - ) # point inside active domain + tri.add_region((1, 1), 0, maximum_area=100) # point inside active domain tri.add_region((11, 11), 1, maximum_area=10) # point inside area1 tri.add_region((70, 70), 2, maximum_area=1) # point inside area2 diff --git a/autotest/test_gridgen.py b/autotest/test_gridgen.py index 12a8d3f2c0..fbd7d8ad74 100644 --- a/autotest/test_gridgen.py +++ b/autotest/test_gridgen.py @@ -160,9 +160,7 @@ def test_mf6disv(function_tmpdir): botm = [top - k * dz for k in range(1, nlay + 1)] # Create a dummy model and regular grid to use as the base grid for gridgen - sim = flopy.mf6.MFSimulation( - sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" - ) + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6") gwf = flopy.mf6.ModflowGwf(sim, modelname=name) dis = flopy.mf6.ModflowGwfdis( @@ -193,17 +191,13 @@ def test_mf6disv(function_tmpdir): # build run and post-process the MODFLOW 6 model name = "mymodel" - sim = flopy.mf6.MFSimulation( - sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" - ) + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6") tdis = flopy.mf6.ModflowTdis(sim) ims = flopy.mf6.ModflowIms(sim, linear_acceleration="bicgstab") gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) disv = flopy.mf6.ModflowGwfdisv(gwf, **disv_gridprops) ic = flopy.mf6.ModflowGwfic(gwf) - npf = flopy.mf6.ModflowGwfnpf( - gwf, xt3doptions=True, save_specific_discharge=True - ) + npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True) chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd) budget_file = f"{name}.bud" head_file = f"{name}.hds" @@ -297,9 +291,7 @@ def sim_disu_diff_layers(function_tmpdir): botm = [top - k * dz for k in range(1, nlay + 1)] # Create a dummy model and regular grid to use as the base grid for gridgen - sim = flopy.mf6.MFSimulation( - sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" - ) + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6") gwf = flopy.mf6.ModflowGwf(sim, modelname=name) dis = flopy.mf6.ModflowGwfdis( @@ -328,17 +320,13 @@ def sim_disu_diff_layers(function_tmpdir): # build run and post-process the MODFLOW 6 model name = "mymodel" - sim = flopy.mf6.MFSimulation( - sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" - ) + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6") tdis = flopy.mf6.ModflowTdis(sim) ims = flopy.mf6.ModflowIms(sim, linear_acceleration="bicgstab") gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) disu = flopy.mf6.ModflowGwfdisu(gwf, **disu_gridprops) ic = flopy.mf6.ModflowGwfic(gwf) - npf = flopy.mf6.ModflowGwfnpf( - gwf, xt3doptions=True, save_specific_discharge=True - ) + npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True) chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd) budget_file = f"{name}.bud" head_file = f"{name}.hds" @@ -423,9 +411,7 @@ def test_mf6disu(sim_disu_diff_layers): raise AssertionError("Boundary condition was not drawn") for col in ax.collections: - if not isinstance( - col, (QuadMesh, PathCollection, LineCollection) - ): + if not isinstance(col, (QuadMesh, PathCollection, LineCollection)): raise AssertionError("Unexpected collection type") plt.close() @@ -546,9 +532,7 @@ def test_mfusg(function_tmpdir): ax.set_aspect("equal") pmv.plot_array(head[ilay], cmap="jet", vmin=vmin, vmax=vmax) pmv.plot_grid(colors="k", alpha=0.1) - pmv.contour_array( - head[ilay], levels=[0.2, 0.4, 0.6, 0.8], linewidths=3.0 - ) + pmv.contour_array(head[ilay], levels=[0.2, 0.4, 0.6, 0.8], linewidths=3.0) ax.set_title(f"Layer {ilay + 1}") # pmv.plot_specific_discharge(spdis, color='white') fname = "results.png" @@ -574,9 +558,7 @@ def test_mfusg(function_tmpdir): raise AssertionError("Boundary condition was not drawn") for col in ax.collections: - if not isinstance( - col, (QuadMesh, PathCollection, LineCollection) - ): + if not isinstance(col, (QuadMesh, PathCollection, LineCollection)): raise AssertionError("Unexpected collection type") plt.close() @@ -587,13 +569,10 @@ def test_mfusg(function_tmpdir): m.run_model() # also test load of unstructured LPF with keywords - lpf2 = flopy.mfusg.MfUsgLpf.load( - function_tmpdir / f"{name}.lpf", m, check=False - ) + lpf2 = flopy.mfusg.MfUsgLpf.load(function_tmpdir / f"{name}.lpf", m, check=False) msg = "NOCVCORRECTION and NOVFC should be in lpf options but at least one is not." assert ( - "NOVFC" in lpf2.options.upper() - and "NOCVCORRECTION" in lpf2.options.upper() + "NOVFC" in lpf2.options.upper() and "NOCVCORRECTION" in lpf2.options.upper() ), msg # test disu, bas6, lpf shapefile export for mfusg unstructured models @@ -768,9 +747,7 @@ def test_gridgen(function_tmpdir): points = [(4750.0, 5250.0)] cells = g.intersect(points, "point", 0) n = cells["nodenumber"][0] - msg = ( - f"gridgen point intersect did not identify the correct cell {n} <> 308" - ) + msg = f"gridgen point intersect did not identify the correct cell {n} <> 308" assert n == 308, msg # test the gridgen line intersection @@ -800,9 +777,8 @@ def test_gridgen(function_tmpdir): 455, 384, ] - msg = ( - "gridgen line intersect did not identify the correct " - "cells {} <> {}".format(nlist, nlist2) + msg = "gridgen line intersect did not identify the correct cells {} <> {}".format( + nlist, nlist2 ) assert nlist == nlist2, msg @@ -826,10 +802,7 @@ def test_gridgen(function_tmpdir): "be (with vertical pass through activated)." ) assert ( - len( - ja0[(ja0 > disu_vp.nodelay[0]) & (ja0 <= sum(disu_vp.nodelay[:2]))] - ) - == 0 + len(ja0[(ja0 > disu_vp.nodelay[0]) & (ja0 <= sum(disu_vp.nodelay[:2]))]) == 0 ), msg # test mfusg without vertical pass-through @@ -864,9 +837,7 @@ def test_flopy_issue_1492(function_tmpdir): botm = [top - k * dz for k in range(1, nlay + 1)] # Create a dummy model and regular grid to use as the base grid for gridgen - sim = flopy.mf6.MFSimulation( - sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" - ) + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6") gwf = flopy.mf6.ModflowGwf(sim, modelname=name) dis = flopy.mf6.ModflowGwfdis( gwf, @@ -908,9 +879,7 @@ def test_flopy_issue_1492(function_tmpdir): gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) disv = flopy.mf6.ModflowGwfdisv(gwf, **disv_gridprops) ic = flopy.mf6.ModflowGwfic(gwf) - npf = flopy.mf6.ModflowGwfnpf( - gwf, xt3doptions=True, save_specific_discharge=True - ) + npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=True, save_specific_discharge=True) chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd) budget_file = name + ".bud" head_file = name + ".hds" diff --git a/autotest/test_gridintersect.py b/autotest/test_gridintersect.py index 0fccfb4e57..1bc26acc85 100644 --- a/autotest/test_gridintersect.py +++ b/autotest/test_gridintersect.py @@ -472,9 +472,7 @@ def test_rect_grid_linestring_in_and_out_of_cell2(): # TODO: remove in 3.10.0 gr = get_rect_grid() ix = GridIntersect(gr, method="structured") - result = ix.intersect( - LineString([(5, 15), (5.0, 9), (15.0, 5.0), (5.0, 1.0)]) - ) + result = ix.intersect(LineString([(5, 15), (5.0, 9), (15.0, 5.0), (5.0, 1.0)])) assert len(result) == 3 @@ -601,9 +599,7 @@ def test_rect_grid_linestring_in_and_out_of_cell_shapely(rtree): def test_rect_grid_linestring_in_and_out_of_cell2_shapely(): gr = get_rect_grid() ix = GridIntersect(gr, method="vertex") - result = ix.intersect( - LineString([(5, 15), (5.0, 9), (15.0, 5.0), (5.0, 1.0)]) - ) + result = ix.intersect(LineString([(5, 15), (5.0, 9), (15.0, 5.0), (5.0, 1.0)])) assert len(result) == 3 @@ -812,9 +808,7 @@ def test_rect_grid_polygon_in_2cells(): # TODO: remove in 3.10.0 gr = get_rect_grid() ix = GridIntersect(gr, method="structured") - result = ix.intersect( - Polygon([(2.5, 5.0), (7.5, 5.0), (7.5, 15.0), (2.5, 15.0)]) - ) + result = ix.intersect(Polygon([(2.5, 5.0), (7.5, 5.0), (7.5, 15.0), (2.5, 15.0)])) assert len(result) == 2 assert result.areas.sum() == 50.0 @@ -854,9 +848,7 @@ def test_rect_grid_polygon_on_inner_boundary(): # TODO: remove in 3.10.0 gr = get_rect_grid() ix = GridIntersect(gr, method="structured") - result = ix.intersect( - Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)]) - ) + result = ix.intersect(Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)])) assert len(result) == 2 assert result.areas.sum() == 50.0 @@ -1046,9 +1038,7 @@ def test_rect_grid_polygon_outside_shapely(rtree): def test_rect_grid_polygon_in_2cells_shapely(rtree): gr = get_rect_grid() ix = GridIntersect(gr, method="vertex", rtree=rtree) - result = ix.intersect( - Polygon([(2.5, 5.0), (7.5, 5.0), (7.5, 15.0), (2.5, 15.0)]) - ) + result = ix.intersect(Polygon([(2.5, 5.0), (7.5, 5.0), (7.5, 15.0), (2.5, 15.0)])) assert len(result) == 2 assert result.areas.sum() == 50.0 @@ -1087,9 +1077,7 @@ def test_rect_grid_polygon_running_along_boundary_shapely(): def test_rect_grid_polygon_on_inner_boundary_shapely(rtree): gr = get_rect_grid() ix = GridIntersect(gr, method="vertex", rtree=rtree) - result = ix.intersect( - Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)]) - ) + result = ix.intersect(Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)])) assert len(result) == 2 assert result.areas.sum() == 50.0 @@ -1172,9 +1160,7 @@ def test_tri_grid_polygon_in_2cells(rtree): if gr == -1: return ix = GridIntersect(gr, rtree=rtree) - result = ix.intersect( - Polygon([(2.5, 5.0), (5.0, 5.0), (5.0, 15.0), (2.5, 15.0)]) - ) + result = ix.intersect(Polygon([(2.5, 5.0), (5.0, 5.0), (5.0, 15.0), (2.5, 15.0)])) assert len(result) == 2 assert result.areas.sum() == 25.0 @@ -1199,9 +1185,7 @@ def test_tri_grid_polygon_on_inner_boundary(rtree): if gr == -1: return ix = GridIntersect(gr, rtree=rtree) - result = ix.intersect( - Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)]) - ) + result = ix.intersect(Polygon([(5.0, 10.0), (15.0, 10.0), (15.0, 5.0), (5.0, 5.0)])) assert len(result) == 4 assert result.areas.sum() == 50.0 diff --git a/autotest/test_headufile.py b/autotest/test_headufile.py index e00f5106c8..0bb807a48c 100644 --- a/autotest/test_headufile.py +++ b/autotest/test_headufile.py @@ -96,9 +96,7 @@ def test_get_ts_single_node(mfusg_model): # test if single node idx works one_hds = head_file.get_ts(idx=300) - assert ( - one_hds[0, 1] == head[0][300] - ), "head from 'get_ts' != head from 'get_data'" + assert one_hds[0, 1] == head[0][300], "head from 'get_ts' != head from 'get_data'" @requires_exe("mfusg", "gridgen") diff --git a/autotest/test_hydmodfile.py b/autotest/test_hydmodfile.py index 3bf29a2689..e3592ac8f2 100644 --- a/autotest/test_hydmodfile.py +++ b/autotest/test_hydmodfile.py @@ -55,13 +55,9 @@ def test_hydmodfile_create(function_tmpdir): def test_hydmodfile_load(function_tmpdir, hydmod_model_path): model = "test1tr.nam" - m = Modflow.load( - model, version="mf2005", model_ws=hydmod_model_path, verbose=True - ) + m = Modflow.load(model, version="mf2005", model_ws=hydmod_model_path, verbose=True) hydref = m.hyd - assert isinstance( - hydref, ModflowHyd - ), "Did not load hydmod package...test1tr.hyd" + assert isinstance(hydref, ModflowHyd), "Did not load hydmod package...test1tr.hyd" m.change_model_ws(function_tmpdir) m.hyd.write_file() @@ -101,9 +97,7 @@ def test_hydmodfile_read(hydmod_model_path): for label in labels: data = h.get_data(obsname=label) - assert data.shape == ( - len(times), - ), f"data shape is not ({len(times)},)" + assert data.shape == (len(times),), f"data shape is not ({len(times)},)" data = h.get_data() assert data.shape == (len(times),), f"data shape is not ({len(times)},)" @@ -137,9 +131,7 @@ def test_mf6obsfile_read(mf6_obs_model_path): assert isinstance(h, Mf6Obs) ntimes = h.get_ntimes() - assert ( - ntimes == 3 - ), f"Not enough times in {txt} file...{os.path.basename(pth)}" + assert ntimes == 3, f"Not enough times in {txt} file...{os.path.basename(pth)}" times = h.get_times() assert len(times) == 3, "Not enough times in {} file...{}".format( @@ -167,14 +159,10 @@ def test_mf6obsfile_read(mf6_obs_model_path): for label in labels: data = h.get_data(obsname=label) - assert data.shape == ( - len(times), - ), f"data shape is not ({len(times)},)" + assert data.shape == (len(times),), f"data shape is not ({len(times)},)" data = h.get_data() - assert data.shape == ( - len(times), - ), f"data shape is not ({len(times)},)" + assert data.shape == (len(times),), f"data shape is not ({len(times)},)" assert ( len(data.dtype.names) == nitems + 1 ), f"data column length is not {len(nitems + 1)}" diff --git a/autotest/test_lake_connections.py b/autotest/test_lake_connections.py index 1632627117..fc63833f59 100644 --- a/autotest/test_lake_connections.py +++ b/autotest/test_lake_connections.py @@ -40,9 +40,7 @@ def export_ascii_grid(modelgrid, file_path, v, nodata=0.0): np.savetxt(f, v, fmt="%.4f") -def get_lake_connection_data( - nrow, ncol, delr, delc, lakibd, idomain, lakebed_leakance -): +def get_lake_connection_data(nrow, ncol, delr, delc, lakibd, idomain, lakebed_leakance): # derived from original modflow6-examples function in ex-gwt-prudic2004t2 lakeconnectiondata = [] nlakecon = [0, 0] @@ -262,9 +260,10 @@ def test_lake(function_tmpdir, example_data_path): pakdata_dict[0] == 54 ), f"number of lake connections ({pakdata_dict[0]}) not equal to 54." - assert len(connectiondata) == 54, ( - "number of lake connectiondata entries ({}) not equal " - "to 54.".format(len(connectiondata)) + assert ( + len(connectiondata) == 54 + ), "number of lake connectiondata entries ({}) not equal to 54.".format( + len(connectiondata) ) lak_pak_data = [] @@ -462,9 +461,10 @@ def test_embedded_lak_ex01(function_tmpdir, example_data_path): pakdata_dict[0] == 57 ), f"number of lake connections ({pakdata_dict[0]}) not equal to 57." - assert len(connectiondata) == 57, ( - "number of lake connectiondata entries ({}) not equal " - "to 57.".format(len(connectiondata)) + assert ( + len(connectiondata) == 57 + ), "number of lake connectiondata entries ({}) not equal to 57.".format( + len(connectiondata) ) lak_pak_data = [] @@ -517,10 +517,7 @@ def test_embedded_lak_prudic(example_data_path): bot0 = np.loadtxt(fname) botm = np.array( [bot0] - + [ - np.ones(shape2d, dtype=float) * (bot0 - (delv * k)) - for k in range(1, nlay) - ] + + [np.ones(shape2d, dtype=float) * (bot0 - (delv * k)) for k in range(1, nlay)] ) fname = data_ws / "prudic2004t2_idomain1.dat" idomain0 = np.loadtxt(fname, dtype=np.int32) @@ -559,9 +556,7 @@ def test_embedded_lak_prudic(example_data_path): for idx, nconn in enumerate(lakconn): assert pakdata_dict[idx] == nconn, ( "number of connections calculated by get_lak_connections ({}) " - "not equal to {} for lake {}.".format( - pakdata_dict[idx], nconn, idx + 1 - ) + "not equal to {} for lake {}.".format(pakdata_dict[idx], nconn, idx + 1) ) # compare connectiondata @@ -584,9 +579,7 @@ def test_embedded_lak_prudic(example_data_path): else: match = np.allclose(cd[jdx], cdbase[jdx]) if not match: - print( - f"connection data do match for connection {idx} for lake {cd[0]}" - ) + print(f"connection data do match for connection {idx} for lake {cd[0]}") break assert match, f"connection data do not match for connection {jdx}" @@ -620,10 +613,7 @@ def test_embedded_lak_prudic_mixed(example_data_path): bot0 = np.loadtxt(fname) botm = np.array( [bot0] - + [ - np.ones(shape2d, dtype=float) * (bot0 - (delv * k)) - for k in range(1, nlay) - ] + + [np.ones(shape2d, dtype=float) * (bot0 - (delv * k)) for k in range(1, nlay)] ) fname = data_ws / "prudic2004t2_idomain1.dat" idomain0 = np.loadtxt(fname, dtype=np.int32) @@ -664,8 +654,6 @@ def test_embedded_lak_prudic_mixed(example_data_path): for data in connectiondata: lakeno, bedleak = data[0], data[4] if lakeno == 0: - assert ( - bedleak == "none" - ), f"bedleak for lake 0 is not 'none' ({bedleak})" + assert bedleak == "none", f"bedleak for lake 0 is not 'none' ({bedleak})" else: assert bedleak == 1.0, f"bedleak for lake 1 is not 1.0 ({bedleak})" diff --git a/autotest/test_listbudget.py b/autotest/test_listbudget.py index a9d7ce7929..ffaa73561f 100644 --- a/autotest/test_listbudget.py +++ b/autotest/test_listbudget.py @@ -68,9 +68,7 @@ def test_mflist_reducedpumping(example_data_path): """ test reading reduced pumping data from list file """ - pth = ( - example_data_path / "mfusg_test" / "03B_conduit_unconfined" / "output" - ) + pth = example_data_path / "mfusg_test" / "03B_conduit_unconfined" / "output" list_file = pth / "ex3B.lst" mflist = MfusgListBudget(list_file) assert isinstance(mflist.get_reduced_pumping(), np.recarray) @@ -99,9 +97,7 @@ def test_mflist_reducedpumping_fail(example_data_path): """ test failure for reading reduced pumping data from list file """ - pth = ( - example_data_path / "mfusg_test" / "03A_conduit_unconfined" / "output" - ) + pth = example_data_path / "mfusg_test" / "03A_conduit_unconfined" / "output" list_file = pth / "ex3A.lst" # Catch before flopy to avoid masking file not found assert if not os.path.isfile(list_file): diff --git a/autotest/test_mbase.py b/autotest/test_mbase.py index 2e8688cb3a..2b494bdb31 100644 --- a/autotest/test_mbase.py +++ b/autotest/test_mbase.py @@ -66,17 +66,11 @@ def test_resolve_exe_by_rel_path(function_tmpdir, use_ext, forgive): assert which(actual) # check behavior if exe DNE - with ( - pytest.warns(UserWarning) - if forgive - else pytest.raises(FileNotFoundError) - ): + with pytest.warns(UserWarning) if forgive else pytest.raises(FileNotFoundError): assert not resolve_exe("../bin/mf2005", forgive) -def test_run_model_when_namefile_not_in_model_ws( - mf6_model_path, function_tmpdir -): +def test_run_model_when_namefile_not_in_model_ws(mf6_model_path, function_tmpdir): # copy input files to temp workspace ws = function_tmpdir / "ws" copytree(mf6_model_path, ws) @@ -173,9 +167,7 @@ def test_run_model_exe_rel_path(mf6_model_path, function_tmpdir, use_ext): relpath_safe(Path(which("mf6") or "")), ], ) -def test_run_model_custom_print( - mf6_model_path, function_tmpdir, use_paths, exe -): +def test_run_model_custom_print(mf6_model_path, function_tmpdir, use_paths, exe): ws = function_tmpdir / "ws" copytree(mf6_model_path, ws) diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 6401fa8fb3..003d2fc22f 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -233,9 +233,7 @@ def get_gwt_model(sim, gwtname, gwtpath, modelshape, sourcerecarray=None): gwt, budget_filerecord=f"{gwtname}.cbc", concentration_filerecord=f"{gwtname}.ucn", - concentrationprintrecord=[ - ("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL") - ], + concentrationprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], saverecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], ) @@ -287,9 +285,7 @@ def test_load_and_run_sim_when_namefile_uses_abs_paths( for l in lines: pattern = f"{model_name}." if pattern in l: - l = l.replace( - pattern, str(workspace.absolute()) + os.sep + pattern - ) + l = l.replace(pattern, str(workspace.absolute()) + os.sep + pattern) f.write(l) # load, check and run simulation @@ -301,9 +297,7 @@ def test_load_and_run_sim_when_namefile_uses_abs_paths( @requires_exe("mf6") @pytest.mark.parametrize("sep", ["win", "posix"]) -def test_load_sim_when_namefile_uses_rel_paths( - function_tmpdir, example_data_path, sep -): +def test_load_sim_when_namefile_uses_rel_paths(function_tmpdir, example_data_path, sep): # copy model input files to temp workspace model_name = "freyberg" workspace = function_tmpdir / "ws" @@ -321,22 +315,14 @@ def test_load_sim_when_namefile_uses_rel_paths( l = to_win_sep( l.replace( pattern, - "../" - + workspace.name - + "/" - + model_name - + ".", + "../" + workspace.name + "/" + model_name + ".", ) ) else: l = to_posix_sep( l.replace( pattern, - "../" - + workspace.name - + "/" - + model_name - + ".", + "../" + workspace.name + "/" + model_name + ".", ) ) f.write(l) @@ -376,22 +362,14 @@ def test_write_simulation_always_writes_posix_path_separators( l = to_win_sep( l.replace( pattern, - "../" - + workspace.name - + "/" - + model_name - + ".", + "../" + workspace.name + "/" + model_name + ".", ) ) else: l = to_posix_sep( l.replace( pattern, - "../" - + workspace.name - + "/" - + model_name - + ".", + "../" + workspace.name + "/" + model_name + ".", ) ) f.write(l) @@ -516,9 +494,7 @@ def test_subdir(function_tmpdir): ), "Something wrong with model external paths" sim_r.set_all_data_internal() - sim_r.set_all_data_external( - external_data_folder=os.path.join("dat", "dat_l2") - ) + sim_r.set_all_data_external(external_data_folder=os.path.join("dat", "dat_l2")) sim_r.write_simulation() sim_r2 = MFSimulation.load( @@ -823,9 +799,7 @@ def test_binary_read(function_tmpdir): nrow = 10 ncol = 10 - modelgrid = flopy.discretization.StructuredGrid( - nlay=nlay, nrow=nrow, ncol=ncol - ) + modelgrid = flopy.discretization.StructuredGrid(nlay=nlay, nrow=nrow, ncol=ncol) arr = np.arange(nlay * nrow * ncol).astype(np.float64) data_shape = (nlay, nrow, ncol) @@ -934,27 +908,17 @@ def test_props_and_write(function_tmpdir): # workspace as str sim = MFSimulation(sim_ws=str(function_tmpdir)) assert isinstance(sim, MFSimulation) - assert ( - sim.simulation_data.mfpath.get_sim_path() - == function_tmpdir - == sim.sim_path - ) + assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path # workspace as Path sim = MFSimulation(sim_ws=function_tmpdir) assert isinstance(sim, MFSimulation) - assert ( - sim.simulation_data.mfpath.get_sim_path() - == function_tmpdir - == sim.sim_path - ) + assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path tdis = ModflowTdis(sim) assert isinstance(tdis, ModflowTdis) - gwfgwf = ModflowGwfgwf( - sim, exgtype="gwf6-gwf6", exgmnamea="gwf1", exgmnameb="gwf2" - ) + gwfgwf = ModflowGwfgwf(sim, exgtype="gwf6-gwf6", exgmnamea="gwf1", exgmnameb="gwf2") assert isinstance(gwfgwf, ModflowGwfgwf) gwf = ModflowGwf(sim) @@ -1088,9 +1052,7 @@ def test_set_sim_path(function_tmpdir, use_paths): sim.set_sim_path(new_ws if use_paths else str(new_ws)) tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis = mftdis.ModflowTdis( - sim, time_units="DAYS", nper=2, perioddata=tdis_rc - ) + tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) # create model instance model = mfgwf.ModflowGwf( @@ -1128,9 +1090,7 @@ def test_create_and_run_model(function_tmpdir, use_paths): sim_ws=str(function_tmpdir), ) tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis = mftdis.ModflowTdis( - sim, time_units="DAYS", nper=2, perioddata=tdis_rc - ) + tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) # create model instance model = mfgwf.ModflowGwf( @@ -1183,9 +1143,7 @@ def test_create_and_run_model(function_tmpdir, use_paths): ], filename=f"{model_name}.ic", ) - npf_package = mfgwfnpf.ModflowGwfnpf( - model, save_flows=True, icelltype=1, k=100.0 - ) + npf_package = mfgwfnpf.ModflowGwfnpf(model, save_flows=True, icelltype=1, k=100.0) sto_package = mfgwfsto.ModflowGwfsto( model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 @@ -1255,9 +1213,7 @@ def test_get_set_data_record(function_tmpdir): sim_ws=str(function_tmpdir), ) tdis_rc = [(10.0, 4, 1.0), (6.0, 3, 1.0)] - tdis = mftdis.ModflowTdis( - sim, time_units="DAYS", nper=2, perioddata=tdis_rc - ) + tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) # create model instance model = mfgwf.ModflowGwf( @@ -1367,9 +1323,7 @@ def test_get_set_data_record(function_tmpdir): wel = model.get_package("wel") spd_record = wel.stress_period_data.get_record() well_sp_1 = spd_record[0] - assert ( - well_sp_1["filename"] == "testrecordmodel.wel_stress_period_data_1.txt" - ) + assert well_sp_1["filename"] == "testrecordmodel.wel_stress_period_data_1.txt" assert well_sp_1["binary"] is False assert well_sp_1["data"][0][0] == (0, 9, 2) assert well_sp_1["data"][0][1] == -50.0 @@ -1491,10 +1445,7 @@ def test_get_set_data_record(function_tmpdir): assert 0 in spd_record assert isinstance(spd_record[0], dict) assert "filename" in spd_record[0] - assert ( - spd_record[0]["filename"] - == "testrecordmodel.rch_stress_period_data_1.txt" - ) + assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" assert "binary" in spd_record[0] assert spd_record[0]["binary"] is False assert "data" in spd_record[0] @@ -1510,10 +1461,7 @@ def test_get_set_data_record(function_tmpdir): spd_record = rch_package.stress_period_data.get_record() assert isinstance(spd_record[0], dict) assert "filename" in spd_record[0] - assert ( - spd_record[0]["filename"] - == "testrecordmodel.rch_stress_period_data_1.txt" - ) + assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" assert "binary" in spd_record[0] assert spd_record[0]["binary"] is False assert "data" in spd_record[0] @@ -1688,9 +1636,7 @@ def test_sfr_connections(function_tmpdir, example_data_path): sim2.set_all_data_external() sim2.write_simulation() success, buff = sim2.run_simulation() - assert ( - success - ), f"simulation {sim2.name} did not run after being reloaded" + assert success, f"simulation {sim2.name} did not run after being reloaded" # test sfr recarray data model2 = sim2.get_model() @@ -1735,9 +1681,7 @@ def test_array(function_tmpdir): model_name = "test_array" out_dir = function_tmpdir tdis_name = f"{sim_name}.tdis" - sim = MFSimulation( - sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir - ) + sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) ims_package = ModflowIms( @@ -1756,9 +1700,7 @@ def test_array(function_tmpdir): preconditioner_drop_tolerance=0.01, number_orthogonalizations=2, ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") dis = ModflowGwfdis( model, @@ -2109,9 +2051,7 @@ def test_multi_model(function_tmpdir): # gwf-gwf gwfgwf_data = [] for col in range(0, ncol): - gwfgwf_data.append( - [(0, 0, col), (0, 0, col), 1, 0.5, 0.5, 1.0, 0.0, 1.0] - ) + gwfgwf_data.append([(0, 0, col), (0, 0, col), 1, 0.5, 0.5, 1.0, 0.0, 1.0]) gwfgwf = ModflowGwfgwf( sim, exgtype="GWF6-GWF6", @@ -2128,9 +2068,7 @@ def test_multi_model(function_tmpdir): wel_name_1 = wel_1.name[0] lak_name_2 = lak_2.name[0] package_data = [(gwf1.name, wel_name_1), (gwf2.name, lak_name_2)] - period_data = [ - (gwf1.name, wel_name_1, 0, gwf2.name, lak_name_2, 0, "FACTOR", 1.0) - ] + period_data = [(gwf1.name, wel_name_1, 0, gwf2.name, lak_name_2, 0, "FACTOR", 1.0)] fname = "gwfgwf.input.mvr" gwfgwf.mvr.initialize( filename=fname, @@ -2263,18 +2201,10 @@ def test_multi_model(function_tmpdir): assert fi_out[1][2] is None assert fi_out[2][2] == "MIXED" - spca1 = ModflowUtlspca( - gwt2, filename="gwt_model_1.rch1.spc", print_input=True - ) - spca2 = ModflowUtlspca( - gwt2, filename="gwt_model_1.rch2.spc", print_input=False - ) - spca3 = ModflowUtlspca( - gwt2, filename="gwt_model_1.rch3.spc", print_input=True - ) - spca4 = ModflowUtlspca( - gwt2, filename="gwt_model_1.rch4.spc", print_input=True - ) + spca1 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch1.spc", print_input=True) + spca2 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch2.spc", print_input=False) + spca3 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch3.spc", print_input=True) + spca4 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch4.spc", print_input=True) # test writing and loading spca packages sim2.write_simulation() @@ -2311,8 +2241,7 @@ def test_multi_model(function_tmpdir): with pytest.raises( flopy.mf6.mfbase.FlopyException, - match='Extraneous kwargs "param_does_not_exist" ' - "provided to MFPackage.", + match='Extraneous kwargs "param_does_not_exist" provided to MFPackage.', ): # test kwargs error checking wel = ModflowGwfwel( diff --git a/autotest/test_mfnwt.py b/autotest/test_mfnwt.py index 792e54b025..17d3c33270 100644 --- a/autotest/test_mfnwt.py +++ b/autotest/test_mfnwt.py @@ -30,9 +30,7 @@ def analytical_water_table_solution(h1, h2, z, R, K, L, x): def fnwt_model_files(pattern): path = get_example_data_path() / "nwt_test" - return [ - os.path.join(path, f) for f in os.listdir(path) if f.endswith(pattern) - ] + return [os.path.join(path, f) for f in os.listdir(path) if f.endswith(pattern)] @pytest.mark.parametrize("nwtfile", fnwt_model_files(".nwt")) @@ -59,9 +57,7 @@ def test_nwt_pack_load(function_tmpdir, nwtfile): ml2 = Modflow(model_ws=function_tmpdir, version="mfnwt") nwt2 = ModflowNwt.load(fn, ml2) lst = [ - a - for a in dir(nwt) - if not a.startswith("__") and not callable(getattr(nwt, a)) + a for a in dir(nwt) if not a.startswith("__") and not callable(getattr(nwt, a)) ] for l in lst: msg = ( @@ -91,9 +87,7 @@ def test_nwt_model_load(function_tmpdir, namfile): p = ml.get_package(pn) p2 = ml2.get_package(pn) lst = [ - a - for a in dir(p) - if not a.startswith("__") and not callable(getattr(p, a)) + a for a in dir(p) if not a.startswith("__") and not callable(getattr(p, a)) ] for l in lst: msg = ( @@ -229,9 +223,7 @@ def test_mfnwt_run(function_tmpdir): ax.set_ylabel("Error, in m") ax = fig.add_subplot(1, 3, 3) - ax.plot( - x, 100.0 * (head[0, 0, :] - hac) / hac, linewidth=1, color="blue" - ) + ax.plot(x, 100.0 * (head[0, 0, :] - hac) / hac, linewidth=1, color="blue") ax.set_xlabel("Horizontal distance, in m") ax.set_ylabel("Percent Error") diff --git a/autotest/test_mfreadnam.py b/autotest/test_mfreadnam.py index 8f9e7cb77c..286cffce57 100644 --- a/autotest/test_mfreadnam.py +++ b/autotest/test_mfreadnam.py @@ -72,9 +72,7 @@ def test_get_entries_from_namefile_mf2005(path): id="freyberg", ), pytest.param( - _example_data_path - / "freyberg_multilayer_transient" - / "freyberg.nam", + _example_data_path / "freyberg_multilayer_transient" / "freyberg.nam", { "crs": "+proj=utm +zone=14 +ellps=WGS84 +datum=WGS84 +units=m +no_defs", "rotation": 15.0, diff --git a/autotest/test_mfsimlist.py b/autotest/test_mfsimlist.py index 88f9a397b4..6cad2cac79 100644 --- a/autotest/test_mfsimlist.py +++ b/autotest/test_mfsimlist.py @@ -84,14 +84,12 @@ def test_mfsimlist_iterations(function_tmpdir): it_outer = mfsimlst.get_outer_iterations() assert it_outer == it_outer_answer, ( - f"outer iterations is not equal to {it_outer_answer} " - + f"({it_outer})" + f"outer iterations is not equal to {it_outer_answer} " + f"({it_outer})" ) it_total = mfsimlst.get_total_iterations() assert it_total == it_total_answer, ( - f"total iterations is not equal to {it_total_answer} " - + f"({it_total})" + f"total iterations is not equal to {it_total_answer} " + f"({it_total})" ) @@ -117,8 +115,7 @@ def test_mfsimlist_memory(function_tmpdir): virtual_memory = mfsimlst.get_memory_usage(virtual=True) if not np.isnan(virtual_memory): assert virtual_memory == virtual_answer, ( - f"virtual memory is not equal to {virtual_answer} " - + f"({virtual_memory})" + f"virtual memory is not equal to {virtual_answer} " + f"({virtual_memory})" ) non_virtual_memory = mfsimlst.get_non_virtual_memory_usage() diff --git a/autotest/test_mnw.py b/autotest/test_mnw.py index 20ec79188c..9f99f69772 100644 --- a/autotest/test_mnw.py +++ b/autotest/test_mnw.py @@ -53,8 +53,7 @@ def test_load(function_tmpdir, mnw2_examples_path): ).max() < 0.01 assert ( np.abs( - mnw2_2.stress_period_data[0].qdes - - mnw2_3.stress_period_data[0].qdes + mnw2_2.stress_period_data[0].qdes - mnw2_3.stress_period_data[0].qdes ).min() < 0.01 ) @@ -278,16 +277,12 @@ def test_make_package(function_tmpdir, dataframe): assert np.all(np.diff(well1["k"]) > 0) spd = m4.mnw2.stress_period_data[0] inds = spd.k, spd.i, spd.j - assert np.array_equal( - np.array(inds).transpose(), np.array([(2, 1, 1), (1, 3, 3)]) - ) + assert np.array_equal(np.array(inds).transpose(), np.array([(2, 1, 1), (1, 3, 3)])) m4.write_input() # make the package from the objects # reuse second per pumping for last stress period - mnw2fromobj = ModflowMnw2( - model=m4, mnwmax=2, mnw=mnw2_4.mnw, itmp=[2, 2, -1] - ) + mnw2fromobj = ModflowMnw2(model=m4, mnwmax=2, mnw=mnw2_4.mnw, itmp=[2, 2, -1]) # verify that the two input methods produce the same results assert np.array_equal( mnw2_4.stress_period_data[1], mnw2fromobj.stress_period_data[1] @@ -296,9 +291,7 @@ def test_make_package(function_tmpdir, dataframe): m5 = Modflow("mnw2example", model_ws=ws) dis = ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m5) mnw2_5 = ModflowMnw2.load(mnw2_4.fn_path, m5) - assert np.array_equal( - mnw2_4.stress_period_data[1], mnw2_5.stress_period_data[1] - ) + assert np.array_equal(mnw2_4.stress_period_data[1], mnw2_5.stress_period_data[1]) @pytest.mark.parametrize("dataframe", [True, False]) @@ -350,9 +343,7 @@ def test_mnw2_create_file(function_tmpdir, dataframe): nnodes=nlayers[i], nper=len(stress_period_data.index), node_data=( - node_data.to_records(index=False) - if not dataframe - else node_data + node_data.to_records(index=False) if not dataframe else node_data ), stress_period_data=( stress_period_data.to_records(index=False) @@ -367,9 +358,7 @@ def test_mnw2_create_file(function_tmpdir, dataframe): model=mf, mnwmax=len(wells), mnw=wells, - itmp=list( - (np.ones(len(stress_period_data.index)) * len(wellids)).astype(int) - ), + itmp=list((np.ones(len(stress_period_data.index)) * len(wellids)).astype(int)), ) if len(mnw2.node_data) != 6: @@ -504,9 +493,7 @@ def test_blank_lines(function_tmpdir): def test_make_well(): w1 = Mnw(wellid="Case-1") - assert ( - w1.wellid == "case-1" - ), "did not correctly convert well id to lower case" + assert w1.wellid == "case-1", "did not correctly convert well id to lower case" def test_checks(mnw2_examples_path): diff --git a/autotest/test_model_dot_plot.py b/autotest/test_model_dot_plot.py index da8cac18d4..f4cfb377aa 100644 --- a/autotest/test_model_dot_plot.py +++ b/autotest/test_model_dot_plot.py @@ -14,9 +14,7 @@ def test_vertex_model_dot_plot(example_data_path): rcParams["figure.max_open_warning"] = 36 # load up the vertex example problem - sim = MFSimulation.load( - sim_ws=example_data_path / "mf6" / "test003_gwftri_disv" - ) + sim = MFSimulation.load(sim_ws=example_data_path / "mf6" / "test003_gwftri_disv") disv_ml = sim.get_model("gwf_1") ax = disv_ml.plot() assert isinstance(ax, list) @@ -44,9 +42,7 @@ def test_dataset_dot_plot(function_tmpdir, example_data_path): assert len(ax) == 2, f"number of hy axes ({len(ax)}) is not equal to 2" -def test_dataset_dot_plot_nlay_ne_plottable( - function_tmpdir, example_data_path -): +def test_dataset_dot_plot_nlay_ne_plottable(function_tmpdir, example_data_path): import matplotlib.pyplot as plt loadpth = example_data_path / "mf2005_test" @@ -66,9 +62,7 @@ def test_model_dot_plot_export(function_tmpdir, example_data_path): ml.plot(mflay=0, filename_base=fh, file_extension="png") files = [f for f in listdir(function_tmpdir) if f.endswith(".png")] if len(files) < 10: - raise AssertionError( - "ml.plot did not properly export all supported data types" - ) + raise AssertionError("ml.plot did not properly export all supported data types") for f in files: t = f.split("_") diff --git a/autotest/test_model_splitter.py b/autotest/test_model_splitter.py index 8b2e1c5d9a..dc19a45e5b 100644 --- a/autotest/test_model_splitter.py +++ b/autotest/test_model_splitter.py @@ -236,9 +236,7 @@ def test_save_load_node_mapping(function_tmpdir): for k, v1 in original_node_map.items(): v2 = saved_node_map[k] if not v1 == v2: - raise AssertionError( - "Node map read/write not returning proper values" - ) + raise AssertionError("Node map read/write not returning proper values") array_dict = {} for model in range(nparts): @@ -345,23 +343,17 @@ def test_control_records(function_tmpdir): raise AssertionError("Constants not being preserved for MFArray") if kls[1].data_storage_type.value != 3 or kls[1].binary: - raise AssertionError( - "External ascii files not being preserved for MFArray" - ) + raise AssertionError("External ascii files not being preserved for MFArray") k33ls = ml1.npf.k33._data_storage.layer_storage.multi_dim_list if k33ls[1].data_storage_type.value != 3 or not k33ls[1].binary: - raise AssertionError( - "Binary file input not being preserved for MFArray" - ) + raise AssertionError("Binary file input not being preserved for MFArray") spd_ls1 = ml1.wel.stress_period_data.get_record(1) spd_ls2 = ml1.wel.stress_period_data.get_record(2) if spd_ls1["filename"] is None or spd_ls1["binary"]: - raise AssertionError( - "External ascii files not being preserved for MFList" - ) + raise AssertionError("External ascii files not being preserved for MFList") if spd_ls2["filename"] is None or not spd_ls2["binary"]: raise AssertionError( @@ -573,8 +565,7 @@ def test_transient_array(function_tmpdir): ): d[key] = g.sto.steady_state.get_data(key) assert d == steady, ( - "storage steady_state dictionary " - + f"does not match for model '{name}'" + "storage steady_state dictionary " + f"does not match for model '{name}'" ) d = {} for key in (1,): @@ -682,11 +673,7 @@ def test_idomain_none(function_tmpdir): head_dict = {} for idx, modelname in enumerate(new_sim.model_names): mnum = int(modelname.split("_")[-1]) - h = ( - new_sim.get_model(modelname) - .output.head() - .get_data(kstpkper=kstpkper) - ) + h = new_sim.get_model(modelname).output.head().get_data(kstpkper=kstpkper) head_dict[mnum] = h new_head = ms.reconstruct_array(head_dict) @@ -830,9 +817,7 @@ def test_unstructured_complex_disu(function_tmpdir): chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=spd) spd = {0: [("HEAD", "LAST")]} - oc = flopy.mf6.ModflowGwfoc( - gwf, head_filerecord=f"{mname}.hds", saverecord=spd - ) + oc = flopy.mf6.ModflowGwfoc(gwf, head_filerecord=f"{mname}.hds", saverecord=spd) sim.write_simulation() sim.run_simulation() @@ -979,9 +964,7 @@ def string2geom(geostring, conversion=None): for c in range(ncol): if idomain[0, r, c] == 1: conductance = leakance * area - discharge_data.append( - (0, r, c, top[r, c] - 0.5, conductance, 1.0) - ) + discharge_data.append((0, r, c, top[r, c] - 0.5, conductance, 1.0)) topc = np.zeros((nlay, nrow, ncol), dtype=float) botm = np.zeros((nlay, nrow, ncol), dtype=float) @@ -1198,14 +1181,10 @@ def build_gwt_model(sim, gwtname, rch_package): ) # initial conditions - ic = flopy.mf6.ModflowGwtic( - gwt, strt=conc_start, filename=f"{gwtname}.ic" - ) + ic = flopy.mf6.ModflowGwtic(gwt, strt=conc_start, filename=f"{gwtname}.ic") # advection - adv = flopy.mf6.ModflowGwtadv( - gwt, scheme="tvd", filename=f"{gwtname}.adv" - ) + adv = flopy.mf6.ModflowGwtadv(gwt, scheme="tvd", filename=f"{gwtname}.adv") # dispersion dsp = flopy.mf6.ModflowGwtdsp( @@ -1219,9 +1198,7 @@ def build_gwt_model(sim, gwtname, rch_package): ) # mass storage and transfer - mst = flopy.mf6.ModflowGwtmst( - gwt, porosity=porosity, filename=f"{gwtname}.mst" - ) + mst = flopy.mf6.ModflowGwtmst(gwt, porosity=porosity, filename=f"{gwtname}.mst") # sources sourcerecarray = [ @@ -1308,15 +1285,11 @@ def build_gwt_model(sim, gwtname, rch_package): X_split = mfs.reconstruct_array(array_dict) - err_msg = ( - f"Outputs from {name} and split model " f"are not within tolerance" - ) + err_msg = f"Outputs from {name} and split model are not within tolerance" X_split[idomain == 0] = np.nan X[idomain == 0] = np.nan if name == "gwf": - np.testing.assert_allclose( - X, X_split, equal_nan=True, err_msg=err_msg - ) + np.testing.assert_allclose(X, X_split, equal_nan=True, err_msg=err_msg) else: diff = np.abs(X_split - X) diff = np.nansum(diff) diff --git a/autotest/test_modflow.py b/autotest/test_modflow.py index 5cb7b161df..cadf978f3c 100644 --- a/autotest/test_modflow.py +++ b/autotest/test_modflow.py @@ -47,10 +47,7 @@ def parameters_model_path(example_data_path): @pytest.mark.parametrize( "namfile", [Path("freyberg") / "freyberg.nam"] - + [ - Path("parameters") / f"{nf}.nam" - for nf in ["Oahu_01", "twrip", "twrip_upw"] - ], + + [Path("parameters") / f"{nf}.nam" for nf in ["Oahu_01", "twrip", "twrip_upw"]], ) def test_modflow_load(namfile, example_data_path): mpath = Path(example_data_path / namfile).parent @@ -95,9 +92,7 @@ def test_modflow_load(namfile, example_data_path): id="freyberg", ), pytest.param( - _example_data_path - / "freyberg_multilayer_transient" - / "freyberg.nam", + _example_data_path / "freyberg_multilayer_transient" / "freyberg.nam", { "proj4": "+proj=utm +zone=14 +ellps=WGS84 +datum=WGS84 +units=m +no_defs", "angrot": 15.0, @@ -144,9 +139,7 @@ def test_modflow_load_when_nam_dne(): def test_mbase_modelgrid(function_tmpdir): - ml = Modflow( - modelname="test", xll=500.0, rotation=12.5, start_datetime="1/1/2016" - ) + ml = Modflow(modelname="test", xll=500.0, rotation=12.5, start_datetime="1/1/2016") try: print(ml.modelgrid.xcentergrid) except: @@ -216,12 +209,8 @@ def test_mt_modelgrid(function_tmpdir): verbose=True, ) - assert ( - swt.modelgrid.xoffset == mt.modelgrid.xoffset == ml.modelgrid.xoffset - ) - assert ( - swt.modelgrid.yoffset == mt.modelgrid.yoffset == ml.modelgrid.yoffset - ) + assert swt.modelgrid.xoffset == mt.modelgrid.xoffset == ml.modelgrid.xoffset + assert swt.modelgrid.yoffset == mt.modelgrid.yoffset == ml.modelgrid.yoffset assert mt.modelgrid.crs == ml.modelgrid.crs == swt.modelgrid.crs assert mt.modelgrid.angrot == ml.modelgrid.angrot == swt.modelgrid.angrot assert np.array_equal(mt.modelgrid.idomain, ml.modelgrid.idomain) @@ -250,12 +239,8 @@ def test_mt_modelgrid(function_tmpdir): verbose=True, ) - assert ( - ml.modelgrid.xoffset == mt.modelgrid.xoffset == swt.modelgrid.xoffset - ) - assert ( - mt.modelgrid.yoffset == ml.modelgrid.yoffset == swt.modelgrid.yoffset - ) + assert ml.modelgrid.xoffset == mt.modelgrid.xoffset == swt.modelgrid.xoffset + assert mt.modelgrid.yoffset == ml.modelgrid.yoffset == swt.modelgrid.yoffset assert mt.modelgrid.crs == ml.modelgrid.crs == swt.modelgrid.crs assert mt.modelgrid.angrot == ml.modelgrid.angrot == swt.modelgrid.angrot assert np.array_equal(mt.modelgrid.idomain, ml.modelgrid.idomain) @@ -272,14 +257,11 @@ def test_exe_selection(example_data_path, function_tmpdir): assert Path(Modflow().exe_name).stem == exe_name assert Path(Modflow(exe_name=None).exe_name).stem == exe_name assert ( - Path(Modflow.load(namfile_path, model_ws=model_path).exe_name).stem - == exe_name + Path(Modflow.load(namfile_path, model_ws=model_path).exe_name).stem == exe_name ) assert ( Path( - Modflow.load( - namfile_path, exe_name=None, model_ws=model_path - ).exe_name + Modflow.load(namfile_path, exe_name=None, model_ws=model_path).exe_name ).stem == exe_name ) @@ -290,9 +272,7 @@ def test_exe_selection(example_data_path, function_tmpdir): assert Path(Modflow(exe_name=exe_name).exe_name).stem == exe_name assert ( Path( - Modflow.load( - namfile_path, exe_name=exe_name, model_ws=model_path - ).exe_name + Modflow.load(namfile_path, exe_name=exe_name, model_ws=model_path).exe_name ).stem == exe_name ) @@ -420,13 +400,9 @@ def test_load_twri_grid(example_data_path): name = "twri.nam" ml = Modflow.load(name, model_ws=mpath, check=False) mg = ml.modelgrid - assert isinstance( - mg, StructuredGrid - ), "modelgrid is not an StructuredGrid instance" + assert isinstance(mg, StructuredGrid), "modelgrid is not an StructuredGrid instance" shape = (3, 15, 15) - assert ( - mg.shape == shape - ), f"modelgrid shape {mg.shape} not equal to {shape}" + assert mg.shape == shape, f"modelgrid shape {mg.shape} not equal to {shape}" thickness = mg.cell_thickness shape = (5, 15, 15) assert ( @@ -484,9 +460,7 @@ def test_mg(function_tmpdir): # test that transform for arbitrary coordinates # is working in same as transform for model grid - mg2 = StructuredGrid( - delc=ms.dis.delc.array, delr=ms.dis.delr.array, lenuni=2 - ) + mg2 = StructuredGrid(delc=ms.dis.delc.array, delr=ms.dis.delr.array, lenuni=2) x = mg2.xcellcenters[0] y = mg2.ycellcenters[0] mg2.set_coord_info(xoff=xll, yoff=yll, angrot=angrot) @@ -522,9 +496,7 @@ def test_dynamic_xll_yll(): xll, yll = 286.80, 29.03 # test scaling of length units ms2 = Modflow() - dis = ModflowDis( - ms2, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr, delc=delc - ) + dis = ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr, delc=delc) ms2.modelgrid.set_coord_info(xoff=xll, yoff=yll, angrot=30.0) xll1, yll1 = ms2.modelgrid.xoffset, ms2.modelgrid.yoffset @@ -976,10 +948,7 @@ def test_properties_check(function_tmpdir): ) ind3_errors = chk.summary_array[ind3]["desc"] - assert ( - "zero or negative horizontal hydraulic conductivity values" - in ind1_errors - ) + assert "zero or negative horizontal hydraulic conductivity values" in ind1_errors assert ( "horizontal hydraulic conductivity values below checker threshold of 1e-11" in ind1_errors @@ -993,10 +962,7 @@ def test_properties_check(function_tmpdir): "horizontal hydraulic conductivity values above checker threshold of 100000.0" in ind2_errors ) - assert ( - "zero or negative vertical hydraulic conductivity values" - in ind2_errors - ) + assert "zero or negative vertical hydraulic conductivity values" in ind2_errors assert ( "vertical hydraulic conductivity values above checker threshold of 100000.0" in ind3_errors @@ -1042,9 +1008,7 @@ def test_rchload(function_tmpdir): m1 = Modflow("rchload1", model_ws=ws) dis1 = ModflowDis(m1, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper) a = np.random.random((nrow, ncol)) - rech1 = Util2d( - m1, (nrow, ncol), np.float32, a, "rech", cnstnt=1.0, how="openclose" - ) + rech1 = Util2d(m1, (nrow, ncol), np.float32, a, "rech", cnstnt=1.0, how="openclose") rch1 = ModflowRch(m1, rech={0: rech1}) m1.write_input() @@ -1059,9 +1023,7 @@ def test_rchload(function_tmpdir): m2 = Modflow("rchload2", model_ws=ws) dis2 = ModflowDis(m2, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper) a = np.random.random((nrow, ncol)) - rech2 = Util2d( - m2, (nrow, ncol), np.float32, a, "rech", cnstnt=2.0, how="openclose" - ) + rech2 = Util2d(m2, (nrow, ncol), np.float32, a, "rech", cnstnt=2.0, how="openclose") rch2 = ModflowRch(m2, rech={0: rech2}) m2.write_input() @@ -1349,9 +1311,7 @@ def get_perftest_model(ws, name): botm=list(range(nlay)), ) - rch = ModflowRch( - m, rech={k: 0.001 - np.cos(k) * 0.001 for k in range(nper)} - ) + rch = ModflowRch(m, rech={k: 0.001 - np.cos(k) * 0.001 for k in range(nper)}) ra = ModflowWel.get_empty(size**2) well_spd = {} @@ -1359,10 +1319,7 @@ def get_perftest_model(ws, name): ra_per = ra.copy() ra_per["k"] = 1 ra_per["i"] = ( - (np.ones((size, size)) * np.arange(size)) - .transpose() - .ravel() - .astype(int) + (np.ones((size, size)) * np.arange(size)).transpose().ravel().astype(int) ) ra_per["j"] = list(range(size)) * size well_spd[kper] = ra @@ -1398,7 +1355,5 @@ def test_model_load_time(function_tmpdir, benchmark): model = get_perftest_model(ws=function_tmpdir, name=name) model.write_input() benchmark( - lambda: Modflow.load( - f"{name}.nam", model_ws=function_tmpdir, check=False - ) + lambda: Modflow.load(f"{name}.nam", model_ws=function_tmpdir, check=False) ) diff --git a/autotest/test_modflowoc.py b/autotest/test_modflowoc.py index 9656f49dd8..4ecb5d32fc 100644 --- a/autotest/test_modflowoc.py +++ b/autotest/test_modflowoc.py @@ -20,6 +20,4 @@ def test_modflowoc_load_fails_when_wrong_nlay_nper_nstp( mpath = example_data_path / "mf2005_test" # noinspection PyTypeChecker with pytest.raises((ValueError, OSError)): - ModflowOc.load( - mpath / "fhb.oc", model, nper=nper, nstp=nstp, nlay=nlay - ) + ModflowOc.load(mpath / "fhb.oc", model, nper=nper, nstp=nstp, nlay=nlay) diff --git a/autotest/test_modpathfile.py b/autotest/test_modpathfile.py index e2e46bae1c..2bedeeb3f1 100644 --- a/autotest/test_modpathfile.py +++ b/autotest/test_modpathfile.py @@ -63,9 +63,7 @@ def get_nodes(locs): # Create the Flopy temporal discretization object pd = (perlen, nstp, tsmult) - tdis = ModflowTdis( - sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=[pd] - ) + tdis = ModflowTdis(sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=[pd]) # Create the Flopy groundwater flow (gwf) model object model_nam_file = f"{name}.nam" @@ -238,9 +236,7 @@ def mp7_large(module_tmpdir): @requires_exe("mf6") -def test_pathline_file_sorts_in_ctor( - function_tmpdir, module_tmpdir, mp7_small -): +def test_pathline_file_sorts_in_ctor(function_tmpdir, module_tmpdir, mp7_small): sim, forward_model_name, backward_model_name, nodew, nodesr = mp7_small ws = function_tmpdir / "ws" @@ -252,8 +248,7 @@ def test_pathline_file_sorts_in_ctor( pathline_file = PathlineFile(forward_path) assert np.all( - pathline_file._data[:-1]["particleid"] - <= pathline_file._data[1:]["particleid"] + pathline_file._data[:-1]["particleid"] <= pathline_file._data[1:]["particleid"] ) @@ -339,9 +334,7 @@ def test_write_shapefile(function_tmpdir, mp7_small, longfieldname): fieldname = "newfield" + ("longname" if longfieldname else "") fieldval = "x" pathlines = [ - rfn.append_fields( - pl, fieldname, list(repeat(fieldval, len(pl))), dtypes="|S1" - ) + rfn.append_fields(pl, fieldname, list(repeat(fieldval, len(pl))), dtypes="|S1") for pl in pathlines ] diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py index ec1f224937..e5c44300a4 100644 --- a/autotest/test_mp6.py +++ b/autotest/test_mp6.py @@ -57,13 +57,9 @@ def test_mpsim(function_tmpdir, mp6_test_path): budget_file=f"{m.name}.bud", ) - mpb = Modpath6Bas( - mp, hdry=m.lpf.hdry, laytyp=m.lpf.laytyp, ibound=1, prsity=0.1 - ) + mpb = Modpath6Bas(mp, hdry=m.lpf.hdry, laytyp=m.lpf.laytyp, ibound=1, prsity=0.1) - sim = mp.create_mpsim( - trackdir="forward", simtype="endpoint", packages="RCH" - ) + sim = mp.create_mpsim(trackdir="forward", simtype="endpoint", packages="RCH") mp.write_input() # replace the well with an mnw @@ -105,9 +101,7 @@ def test_mpsim(function_tmpdir, mp6_test_path): ) # test creation of modpath simulation file for MNW2 # (not a very robust test) - sim = mp.create_mpsim( - trackdir="backward", simtype="pathline", packages="MNW2" - ) + sim = mp.create_mpsim(trackdir="backward", simtype="pathline", packages="MNW2") mp.write_input() # test StartingLocationsFile._write_wo_pandas @@ -115,9 +109,7 @@ def test_mpsim(function_tmpdir, mp6_test_path): sim = Modpath6Sim(model=mp) # starting locations file stl = StartingLocationsFile(model=mp, use_pandas=use_pandas) - stldata = StartingLocationsFile.get_empty_starting_locations_data( - npt=2 - ) + stldata = StartingLocationsFile.get_empty_starting_locations_data(npt=2) stldata["label"] = ["p1", "p2"] stldata[1]["i0"] = 5 stldata[1]["j0"] = 6 @@ -231,9 +223,7 @@ def test_get_destination_data(function_tmpdir, mp6_test_path): xorig, yorig = m.modelgrid.get_coords(well_epd.x0[0], well_epd.y0[0]) assert p3.x - xorig + p3.y - yorig < 1e-4 xorig, yorig = mg1.xcellcenters[3, 4], mg1.ycellcenters[3, 4] - assert ( - np.abs(p3.x - xorig + p3.y - yorig) < 1e-4 - ) # this also checks for 1-based + assert np.abs(p3.x - xorig + p3.y - yorig) < 1e-4 # this also checks for 1-based # test that particle attribute information is consistent with pathline file ra = shp2recarray(function_tmpdir / "pathlines.shp") @@ -260,12 +250,7 @@ def test_get_destination_data(function_tmpdir, mp6_test_path): test1 = mg1.xcellcenters[3, 4] test2 = mg1.ycellcenters[3, 4] assert ( - np.abs( - p3_2.x[0] - - mg1.xcellcenters[3, 4] - + p3_2.y[0] - - mg1.ycellcenters[3, 4] - ) + np.abs(p3_2.x[0] - mg1.xcellcenters[3, 4] + p3_2.y[0] - mg1.ycellcenters[3, 4]) < 1e-4 ) @@ -274,12 +259,7 @@ def test_get_destination_data(function_tmpdir, mp6_test_path): p3_2 = ra.geometry[ra.particleid == 4][0] mg.set_coord_info(xoff=mg.xoffset, yoff=mg.yoffset, angrot=30.0) assert ( - np.abs( - p3_2.x[0] - - mg.xcellcenters[3, 4] - + p3_2.y[0] - - mg.ycellcenters[3, 4] - ) + np.abs(p3_2.x[0] - mg.xcellcenters[3, 4] + p3_2.y[0] - mg.ycellcenters[3, 4]) < 1e-4 ) @@ -351,18 +331,14 @@ def test_modpath(function_tmpdir, example_data_path): prsity=0.2, prsityCB=0.2, ) - sim = mp.create_mpsim( - trackdir="forward", simtype="endpoint", packages="RCH" - ) + sim = mp.create_mpsim(trackdir="forward", simtype="endpoint", packages="RCH") # write forward particle track files mp.write_input() if success: success, buff = mp.run_model(silent=False) - assert ( - success - ), "forward modpath model run did not terminate successfully" + assert success, "forward modpath model run did not terminate successfully" mpnam = "freybergmpp" mpp = Modpath6( @@ -379,18 +355,14 @@ def test_modpath(function_tmpdir, example_data_path): prsity=0.2, prsityCB=0.2, ) - sim = mpp.create_mpsim( - trackdir="backward", simtype="pathline", packages="WEL" - ) + sim = mpp.create_mpsim(trackdir="backward", simtype="pathline", packages="WEL") # write backward particle track files mpp.write_input() if success: success, buff = mpp.run_model(silent=False) - assert ( - success - ), "backward modpath model run did not terminate successfully" + assert success, "backward modpath model run did not terminate successfully" # load modpath output files if success: @@ -411,9 +383,7 @@ def test_modpath(function_tmpdir, example_data_path): except: assert False, "could not load pathline file" plines = pthobj.get_alldata() - assert ( - len(plines) == 576 - ), "there are not 576 particle pathlines in file" + assert len(plines) == 576, "there are not 576 particle pathlines in file" # load the modflow files for model map mfnam = "freyberg.nam" @@ -473,9 +443,7 @@ def test_modpath(function_tmpdir, example_data_path): def test_mp6_timeseries_load(example_data_path): pth = example_data_path / "mp5" - files = [ - pth / name for name in sorted(os.listdir(pth)) if ".timeseries" in name - ] + files = [pth / name for name in sorted(os.listdir(pth)) if ".timeseries" in name] for file in files: print(file) eval_timeseries(file) @@ -483,9 +451,9 @@ def test_mp6_timeseries_load(example_data_path): def eval_timeseries(file): ts = TimeseriesFile(file) - assert isinstance(ts, TimeseriesFile), ( - f"{os.path.basename(file)} " "is not an instance of TimeseriesFile" - ) + assert isinstance( + ts, TimeseriesFile + ), f"{os.path.basename(file)} is not an instance of TimeseriesFile" # get the all of the data tsd = ts.get_alldata() @@ -536,9 +504,7 @@ def get_mf2005_model(name, ws, alt=False): ncol = 4 nlay = 2 nper = 1 - l1_ibound = np.array( - [[[-1, -1, -1, -1], [-1, 1, 1, -1], [-1, -1, -1, -1]]] - ) + l1_ibound = np.array([[[-1, -1, -1, -1], [-1, 1, 1, -1], [-1, -1, -1, -1]]]) l2_ibound = np.ones((1, nrow, ncol)) l2_ibound_alt = np.ones((1, nrow, ncol)) l2_ibound_alt[0, 0, 0] = 0 @@ -552,9 +518,7 @@ def get_mf2005_model(name, ws, alt=False): l1_ibound=l1_ibound, l2_ibound=l2_ibound, l2_ibound_alt=l2_ibound_alt, - ibound=np.concatenate( - (l1_ibound, l2_ibound_alt if alt else l2_ibound), axis=0 - ), + ibound=np.concatenate((l1_ibound, l2_ibound_alt if alt else l2_ibound), axis=0), laytype=[0, 0 if alt else 1], hnoflow=-888, hdry=-777, @@ -623,9 +587,7 @@ def get_mf2005_model(name, ws, alt=False): stress_period_data={0: [[1, 1, 1, -5.0]]}, ) - flopy.modflow.ModflowPcg( - m, hclose=0.001, rclose=0.001, mxiter=150, iter1=30 - ) + flopy.modflow.ModflowPcg(m, hclose=0.001, rclose=0.001, mxiter=150, iter1=30) ocspd = {} for p in range(nper): @@ -906,9 +868,7 @@ def get_mp6_model(m, ctx, name, ws, use_pandas): ) sim = flopy.modpath.Modpath6Sim(model=mp) - stl = flopy.modpath.mp6sim.StartingLocationsFile( - model=mp, use_pandas=use_pandas - ) + stl = flopy.modpath.mp6sim.StartingLocationsFile(model=mp, use_pandas=use_pandas) stldata = stl.get_empty_starting_locations_data(npt=2) stldata["label"] = ["p1", "p2"] stldata[1]["k0"] = 0 @@ -930,9 +890,7 @@ def test_mp_pandas(function_tmpdir): assert success mp_pandas = get_mp6_model(ml, ctx, name, function_tmpdir, use_pandas=True) - mp_no_pandas = get_mp6_model( - ml, ctx, name, function_tmpdir, use_pandas=False - ) + mp_no_pandas = get_mp6_model(ml, ctx, name, function_tmpdir, use_pandas=False) mp_no_pandas.write_input() success, buff = mp_no_pandas.run_model() diff --git a/autotest/test_mp7.py b/autotest/test_mp7.py index d3d1b324f1..13b3e688be 100644 --- a/autotest/test_mp7.py +++ b/autotest/test_mp7.py @@ -75,9 +75,7 @@ def ex01b_mf6_model(function_tmpdir): # Create the Flopy temporal discretization object pd = (perlen, nstp, tsmult) - tdis = ModflowTdis( - sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=[pd] - ) + tdis = ModflowTdis(sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=[pd]) # Create the Flopy groundwater flow (gwf) model object model_nam_file = f"{ex01b_mf6_model_name}.nam" @@ -292,9 +290,7 @@ def test_faceparticles_is1(ex01b_mf6_model): locs, structured=False, drape=0, localx=localx, localy=localy, localz=1 ) fpth = f"{mpnam}.sloc" - pg = ParticleGroup( - particlegroupname="T1NODEPG", particledata=p, filename=fpth - ) + pg = ParticleGroup(particlegroupname="T1NODEPG", particledata=p, filename=fpth) build_modpath( function_tmpdir, mpnam, @@ -459,9 +455,7 @@ def test_cellparticles_is1(ex01b_mf6_model): locs, structured=False, drape=0, localx=0.5, localy=0.5, localz=0.5 ) fpth = f"{mpnam}.sloc" - pg = ParticleGroup( - particlegroupname="T1NODEPG", particledata=p, filename=fpth - ) + pg = ParticleGroup(particlegroupname="T1NODEPG", particledata=p, filename=fpth) build_modpath( function_tmpdir, mpnam, @@ -496,13 +490,9 @@ def test_cellparticleskij_is1(ex01b_mf6_model): for i in range(grid.nrow): for j in range(grid.ncol): locs.append((k, i, j)) - p = ParticleData( - locs, structured=True, drape=0, localx=0.5, localy=0.5, localz=0.5 - ) + p = ParticleData(locs, structured=True, drape=0, localx=0.5, localy=0.5, localz=0.5) fpth = f"{mpnam}.sloc" - pg = ParticleGroup( - particlegroupname="T1KIJPG", particledata=p, filename=fpth - ) + pg = ParticleGroup(particlegroupname="T1KIJPG", particledata=p, filename=fpth) build_modpath( function_tmpdir, mpnam, @@ -576,9 +566,7 @@ def test_cellnode_is3a(ex01b_mf6_model): rowcelldivisions=1, layercelldivisions=1, ) - p = NodeParticleData( - subdivisiondata=[sd, sd, sd], nodes=[locsa, locsb, locsc] - ) + p = NodeParticleData(subdivisiondata=[sd, sd, sd], nodes=[locsa, locsb, locsc]) fpth = f"{mpnam}.sloc" pg = ParticleGroupNodeTemplate( particlegroupname="T3ACELLPG", particledata=p, filename=fpth @@ -655,9 +643,7 @@ def ex01_mf6_model(function_tmpdir): # Create the Flopy temporal discretization object pd = (perlen, nstp, tsmult) - tdis = ModflowTdis( - sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=[pd] - ) + tdis = ModflowTdis(sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=[pd]) # Create the Flopy groundwater flow (gwf) model object model_nam_file = f"{ex01_mf6_model_name}.nam" @@ -856,9 +842,7 @@ def test_mp7sim_replacement(function_tmpdir): @requires_exe("mf6", "mp7") -@pytest.mark.parametrize( - "porosity_type", ("constant", "list", "array_1d", "array") -) +@pytest.mark.parametrize("porosity_type", ("constant", "list", "array_1d", "array")) @pytest.mark.slow def test_mp7bas_porosity(ex01_mf6_model, porosity_type): sim, function_tmpdir = ex01_mf6_model diff --git a/autotest/test_mp7_cases.py b/autotest/test_mp7_cases.py index 27a8f7ec34..cbea11724e 100644 --- a/autotest/test_mp7_cases.py +++ b/autotest/test_mp7_cases.py @@ -83,9 +83,7 @@ def mf6(function_tmpdir): nm = "ex01_mf6" # Create the Flopy simulation object - sim = MFSimulation( - sim_name=nm, exe_name="mf6", version="mf6", sim_ws=ws - ) + sim = MFSimulation(sim_name=nm, exe_name="mf6", version="mf6", sim_ws=ws) # Create the Flopy temporal discretization object pd = (Mp7Cases.perlen, Mp7Cases.nstp, Mp7Cases.tsmult) @@ -265,9 +263,7 @@ def mf2005(function_tmpdir): # output control ModflowOc( m, - stress_period_data={ - (0, 0): ["save head", "save budget", "print head"] - }, + stress_period_data={(0, 0): ["save head", "save budget", "print head"]}, ) ModflowPcg(m, hclose=1e-6, rclose=1e-3, iter1=100, mxiter=50) diff --git a/autotest/test_mt3d.py b/autotest/test_mt3d.py index 313809cc25..b2e3b144cc 100644 --- a/autotest/test_mt3d.py +++ b/autotest/test_mt3d.py @@ -290,9 +290,7 @@ def test_mf2000_zeroth(function_tmpdir, mf2kmt3d_model_path): @pytest.mark.slow @flaky(max_runs=3) @requires_exe("mfnwt", "mt3dms") -@excludes_platform( - "Windows", ci_only=True -) # TODO remove once fixed in MT3D-USGS +@excludes_platform("Windows", ci_only=True) # TODO remove once fixed in MT3D-USGS def test_mfnwt_CrnkNic(function_tmpdir, mfnwtmt3d_model_path): pth = mfnwtmt3d_model_path / "sft_crnkNic" namefile = "CrnkNic.nam" @@ -489,9 +487,7 @@ def test_mt3d_create_woutmfmodel(function_tmpdir): break assert ipos >= 0, f"'{wrn_msg}' warning message not issued" - assert ( - w[ipos].category == UserWarning - ), f"Warning category: {w[0].category}" + assert w[ipos].category == UserWarning, f"Warning category: {w[0].category}" gcg = Mt3dRct(mt) rct = Mt3dGcg(mt) @@ -501,27 +497,13 @@ def test_mt3d_create_woutmfmodel(function_tmpdir): mt.write_input() # confirm that MT3D files exist - assert os.path.isfile( - os.path.join(model_ws, f"{mt.name}.{btn.extension[0]}") - ) - assert os.path.isfile( - os.path.join(model_ws, f"{mt.name}.{adv.extension[0]}") - ) - assert os.path.isfile( - os.path.join(model_ws, f"{mt.name}.{dsp.extension[0]}") - ) - assert os.path.isfile( - os.path.join(model_ws, f"{mt.name}.{ssm.extension[0]}") - ) - assert os.path.isfile( - os.path.join(model_ws, f"{mt.name}.{gcg.extension[0]}") - ) - assert os.path.isfile( - os.path.join(model_ws, f"{mt.name}.{rct.extension[0]}") - ) - assert os.path.isfile( - os.path.join(model_ws, f"{mt.name}.{tob.extension[0]}") - ) + assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{btn.extension[0]}")) + assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{adv.extension[0]}")) + assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{dsp.extension[0]}")) + assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{ssm.extension[0]}")) + assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{gcg.extension[0]}")) + assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{rct.extension[0]}")) + assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{tob.extension[0]}")) def test_mt3d_pht3d(function_tmpdir): @@ -536,9 +518,7 @@ def test_mt3d_pht3d(function_tmpdir): mt.write_input() # confirm that MT3D files exist - assert os.path.isfile( - os.path.join(model_ws, f"{mt.name}.{phc.extension[0]}") - ) + assert os.path.isfile(os.path.join(model_ws, f"{mt.name}.{phc.extension[0]}")) def test_mt3d_multispecies(function_tmpdir): @@ -567,9 +547,7 @@ def test_mt3d_multispecies(function_tmpdir): verbose=True, ) sconc3 = np.random.random((nrow, ncol)) - btn = Mt3dBtn( - mt, ncomp=ncomp, sconc=1.0, sconc2=2.0, sconc3=sconc3, sconc5=5.0 - ) + btn = Mt3dBtn(mt, ncomp=ncomp, sconc=1.0, sconc2=2.0, sconc3=sconc3, sconc5=5.0) # check obs I/O mt.btn.obs = np.array([[0, 2, 300], [0, 1, 250]]) crch32 = np.random.random((nrow, ncol)) @@ -17799,9 +17777,7 @@ def test_lkt_with_multispecies(function_tmpdir): mxpart = 5000 nadvfd = 1 # (1 = Upstream weighting) - adv = Mt3dAdv( - mt, mixelm=mixelm, percel=percel, mxpart=mxpart, nadvfd=nadvfd - ) + adv = Mt3dAdv(mt, mixelm=mixelm, percel=percel, mxpart=mxpart, nadvfd=nadvfd) ## Instantiate generalized conjugate gradient solver (GCG) # package for MT3D-USGS @@ -17970,9 +17946,7 @@ def test_mt3d_ssm_with_nodata_in_1st_sp(function_tmpdir): assert success, "MT3D did not run" ws = function_tmpdir / "ws2" - mf2 = Modflow.load( - "model_mf.nam", model_ws=function_tmpdir, exe_name="mf2005" - ) + mf2 = Modflow.load("model_mf.nam", model_ws=function_tmpdir, exe_name="mf2005") mf2.change_model_ws(ws) mt2 = Mt3dms.load( "model_mt.nam", @@ -18007,9 +17981,7 @@ def test_none_spdtype(function_tmpdir): wel = ModflowWel(mf, stress_period_data=spd) pcg = ModflowPcg(mf) mf.write_input() - mf2 = Modflow.load( - "modflowtest.nam", model_ws=function_tmpdir, verbose=True - ) + mf2 = Modflow.load("modflowtest.nam", model_ws=function_tmpdir, verbose=True) success, buff = mf.run_model(report=True) assert success diff --git a/autotest/test_obs.py b/autotest/test_obs.py index 215b1893a4..b2fb79ad73 100644 --- a/autotest/test_obs.py +++ b/autotest/test_obs.py @@ -124,9 +124,7 @@ def test_obs_load_and_write(function_tmpdir, example_data_path): shutil.copyfile(src, dst) # load the modflow model - mf = Modflow.load( - "tc1-true.nam", verbose=True, model_ws=ws, exe_name="mf2005" - ) + mf = Modflow.load("tc1-true.nam", verbose=True, model_ws=ws, exe_name="mf2005") # run the modflow-2005 model success, buff = mf.run_model(silent=False) @@ -164,9 +162,7 @@ def test_obs_load_and_write(function_tmpdir, example_data_path): raise ValueError("could not load new HOB output file") # load the modflow model - m = Modflow.load( - "tc1-true.nam", verbose=True, model_ws=ws, exe_name="mf2005" - ) + m = Modflow.load("tc1-true.nam", verbose=True, model_ws=ws, exe_name="mf2005") model_ws2 = os.path.join(ws, "flwob") m.change_model_ws(new_pth=model_ws2, reset_external=True) @@ -237,9 +233,7 @@ def test_obs_load_and_write(function_tmpdir, example_data_path): s = f"nqtfb loaded from {m.drob.fn_path} read incorrectly" assert drob.nqtfb == m.drob.nqtfb, s s = f"obsnam loaded from {m.drob.fn_path} read incorrectly" - assert list([n for n in drob.obsnam]) == list( - [n for n in m.drob.obsnam] - ), s + assert list([n for n in drob.obsnam]) == list([n for n in m.drob.obsnam]), s s = f"flwobs loaded from {m.drob.fn_path} read incorrectly" assert np.array_equal(drob.flwobs, m.drob.flwobs), s s = f"layer loaded from {m.drob.fn_path} read incorrectly" @@ -419,12 +413,10 @@ def test_multilayerhob_pr_multiline(): problem_hob = [ "2 4 7", "1 1", - "A19E1_1 -2 140 91 1 1 -0.28321 -0.05389" - " 69 1 1 1 # A19E1 8/13/1975", + "A19E1_1 -2 140 91 1 1 -0.28321 -0.05389 69 1 1 1 # A19E1 8/13/1975", "3 0.954", "4 0.046", - "A19E1_2 -2 140 91 1 1 -0.28321 -0.05389" - " 72 1 1 1 # A19E1 10/9/1975", + "A19E1_2 -2 140 91 1 1 -0.28321 -0.05389 72 1 1 1 # A19E1 10/9/1975", "3 0.954", "4 0.046", ] diff --git a/autotest/test_particledata.py b/autotest/test_particledata.py index f5363f957d..22fccd7dd9 100644 --- a/autotest/test_particledata.py +++ b/autotest/test_particledata.py @@ -39,11 +39,7 @@ def get_nn(grid: StructuredGrid, k, i, j): def flatten(a): return [ - [ - *chain.from_iterable( - xx if isinstance(xx, tuple) else [xx] for xx in x - ) - ] + [*chain.from_iterable(xx if isinstance(xx, tuple) else [xx] for xx in x)] for x in a ] @@ -57,9 +53,7 @@ def test_get_extent_structured_multilayer(): for k in range(grid.nlay): extent = get_extent(grid, k=k, i=i, j=j) assert extent.minz == grid.botm[k, i, j] - assert extent.maxz == ( - grid.top[i, j] if k == 0 else grid.botm[k - 1, i, j] - ) + assert extent.maxz == (grid.top[i, j] if k == 0 else grid.botm[k - 1, i, j]) # test initializers @@ -296,9 +290,7 @@ def test_particledata_to_prp_dis_9(): @pytest.mark.parametrize("lx", [None, 0.5, 0.25]) # local x coord @pytest.mark.parametrize("ly", [None, 0.5, 0.25]) # local y coord -@pytest.mark.parametrize( - "localz", [False, True] -) # whether to return local z coords +@pytest.mark.parametrize("localz", [False, True]) # whether to return local z coords def test_particledata_to_prp_disv_1(lx, ly, localz): """ 1 particle in bottom left cell, testing with default @@ -356,9 +348,7 @@ def test_particledata_to_prp_disv_1(lx, ly, localz): # plt.show() -@pytest.mark.parametrize( - "localz", [False, True] -) # whether to return local z coords +@pytest.mark.parametrize("localz", [False, True]) # whether to return local z coords def test_particledata_to_prp_disv_9(localz): # minimal vertex grid grid = GridCases().vertex_small() @@ -403,23 +393,17 @@ def test_particledata_to_prp_disv_9(localz): assert np.allclose(rpts_prt, rpts_exp, atol=1e-3) -@pytest.mark.parametrize( - "localz", [False, True] -) # whether to return local z coords +@pytest.mark.parametrize("localz", [False, True]) # whether to return local z coords def test_lrcparticledata_to_prp_divisions_defaults(localz, array_snapshot): sd_data = CellDataType() regions = [[0, 0, 1, 0, 1, 1]] - part_data = LRCParticleData( - subdivisiondata=[sd_data], lrcregions=[regions] - ) + part_data = LRCParticleData(subdivisiondata=[sd_data], lrcregions=[regions]) grid = GridCases().structured_small() rpts_prt = flatten(list(part_data.to_prp(grid, localz=localz))) num_cells = reduce( sum, [ - (lrc[3] - lrc[0] + 1) - * (lrc[4] - lrc[1] + 1) - * (lrc[5] - lrc[2] + 1) + (lrc[3] - lrc[0] + 1) * (lrc[4] - lrc[1] + 1) * (lrc[5] - lrc[2] + 1) for lrc in regions ], ) @@ -568,9 +552,7 @@ def test_lrcparticledata_to_prp_1_per_face(array_snapshot): assert rpts_prt == array_snapshot -@pytest.mark.parametrize( - "localz", [False, True] -) # whether to return local z coords +@pytest.mark.parametrize("localz", [False, True]) # whether to return local z coords def test_nodeparticledata_to_prp_disv_defaults( function_tmpdir, example_data_path, localz ): @@ -585,9 +567,7 @@ def test_nodeparticledata_to_prp_disv_defaults( pdat = NodeParticleData() # load gwf simulation, switch workspace, write input files, and run - sim = MFSimulation.load( - sim_ws=example_data_path / "mf6" / "test003_gwfs_disv" - ) + sim = MFSimulation.load(sim_ws=example_data_path / "mf6" / "test003_gwfs_disv") gwf = sim.get_model("gwf_1") grid = gwf.modelgrid gwf_name = "gwf" @@ -635,8 +615,7 @@ def test_nodeparticledata_to_prp_disv_defaults( mp7_pls = pd.concat([pd.DataFrame(ra) for ra in pldata]) mp7_pls = mp7_pls.sort_values(by=["time", "particleid"]).head(27) mp7_rpts = [ - [0, r.k, r.x, r.y, r.zloc if localz else r.z] - for r in mp7_pls.itertuples() + [0, r.k, r.x, r.y, r.zloc if localz else r.z] for r in mp7_pls.itertuples() ] # omit rpt index mp7_rpts.sort() @@ -750,9 +729,7 @@ def test_nodeparticledata_prp_disv_big(function_tmpdir): rowdivisions6=4, columndivisions6=4, ) - pgdata = flopy.modpath.NodeParticleData( - subdivisiondata=facedata, nodes=nodew - ) + pgdata = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) # convert to PRP package data rpts_prt = flatten(list(pgdata.to_prp(grid))) @@ -804,9 +781,7 @@ def test_lrcparticledata_write(function_tmpdir): @pytest.fixture def mf6_sim(function_tmpdir): name = "tiny-gwf" - sim = flopy.mf6.MFSimulation( - sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" - ) + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=function_tmpdir, exe_name="mf6") tdis = flopy.mf6.ModflowTdis(sim) ims = flopy.mf6.ModflowIms(sim) gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) diff --git a/autotest/test_plot_cross_section.py b/autotest/test_plot_cross_section.py index 5c67b37bd6..2da5eadff7 100644 --- a/autotest/test_plot_cross_section.py +++ b/autotest/test_plot_cross_section.py @@ -9,9 +9,7 @@ @pytest.mark.mf6 -@pytest.mark.xfail( - reason="sometimes get LineCollections instead of PatchCollections" -) +@pytest.mark.xfail(reason="sometimes get LineCollections instead of PatchCollections") def test_cross_section_bc_gwfs_disv(example_data_path): mpath = example_data_path / "mf6" / "test003_gwfs_disv" sim = MFSimulation.load(sim_ws=mpath) @@ -29,9 +27,7 @@ def test_cross_section_bc_gwfs_disv(example_data_path): @pytest.mark.mf6 -@pytest.mark.xfail( - reason="sometimes get LineCollections instead of PatchCollections" -) +@pytest.mark.xfail(reason="sometimes get LineCollections instead of PatchCollections") def test_cross_section_bc_lake2tr(example_data_path): mpath = example_data_path / "mf6" / "test045_lake2tr" sim = MFSimulation.load(sim_ws=mpath) @@ -50,9 +46,7 @@ def test_cross_section_bc_lake2tr(example_data_path): @pytest.mark.mf6 -@pytest.mark.xfail( - reason="sometimes get LineCollections instead of PatchCollections" -) +@pytest.mark.xfail(reason="sometimes get LineCollections instead of PatchCollections") def test_cross_section_bc_2models_mvr(example_data_path): mpath = example_data_path / "mf6" / "test006_2models_mvr" sim = MFSimulation.load(sim_ws=mpath) @@ -70,9 +64,7 @@ def test_cross_section_bc_2models_mvr(example_data_path): @pytest.mark.mf6 -@pytest.mark.xfail( - reason="sometimes get LineCollections instead of PatchCollections" -) +@pytest.mark.xfail(reason="sometimes get LineCollections instead of PatchCollections") def test_cross_section_bc_UZF_3lay(example_data_path): mpath = example_data_path / "mf6" / "test001e_UZF_3lay" sim = MFSimulation.load(sim_ws=mpath) @@ -157,9 +149,7 @@ def test_cross_section_valid_line_representations(line): # make sure parsed points are identical for all line representations assert np.allclose(lxc.pts, fxc.pts) and np.allclose(lxc.pts, sxc.pts) - assert ( - set(lxc.xypts.keys()) == set(fxc.xypts.keys()) == set(sxc.xypts.keys()) - ) + assert set(lxc.xypts.keys()) == set(fxc.xypts.keys()) == set(sxc.xypts.keys()) for k in lxc.xypts.keys(): assert np.allclose(lxc.xypts[k], fxc.xypts[k]) and np.allclose( lxc.xypts[k], sxc.xypts[k] @@ -206,9 +196,7 @@ def test_plot_limits(): user_extent = 0, 500, 0, 25 ax.axis(user_extent) - pxc = flopy.plot.PlotCrossSection( - modelgrid=grid, ax=ax, line={"column": 4} - ) + pxc = flopy.plot.PlotCrossSection(modelgrid=grid, ax=ax, line={"column": 4}) pxc.plot_grid() lims = ax.axes.viewLim @@ -218,9 +206,7 @@ def test_plot_limits(): plt.close(fig) fig, ax = plt.subplots(figsize=(8, 8)) - pxc = flopy.plot.PlotCrossSection( - modelgrid=grid, ax=ax, line={"column": 4} - ) + pxc = flopy.plot.PlotCrossSection(modelgrid=grid, ax=ax, line={"column": 4}) pxc.plot_grid() lims = ax.axes.viewLim @@ -256,15 +242,11 @@ def test_plot_centers(): pc = pxc.plot_centers() if not isinstance(pc, PathCollection): - raise AssertionError( - "plot_centers() not returning PathCollection object" - ) + raise AssertionError("plot_centers() not returning PathCollection object") verts = pc._offsets if not verts.shape[0] == active_xc_cells: - raise AssertionError( - "plot_centers() not properly masking inactive cells" - ) + raise AssertionError("plot_centers() not properly masking inactive cells") center_dict = pxc.projctr edge_dict = pxc.projpts @@ -274,6 +256,4 @@ def test_plot_centers(): xmin = np.min(verts[0]) xmax = np.max(verts[0]) if xmax < center < xmin: - raise AssertionError( - "Cell center not properly drawn on cross-section" - ) + raise AssertionError("Cell center not properly drawn on cross-section") diff --git a/autotest/test_plot_map_view.py b/autotest/test_plot_map_view.py index b301d5b0a1..386242d1d9 100644 --- a/autotest/test_plot_map_view.py +++ b/autotest/test_plot_map_view.py @@ -215,9 +215,7 @@ def test_map_view_contour_array_structured(function_tmpdir, ndim, rng): elif ndim == 2: # 1 layer as 2D pmv = PlotMapView(modelgrid=grid, layer=l) - contours = pmv.contour_array( - a=arr.reshape(nlay, nrow, ncol)[l, :, :] - ) + contours = pmv.contour_array(a=arr.reshape(nlay, nrow, ncol)[l, :, :]) plt.savefig(function_tmpdir / f"map_view_contour_{ndim}d_l{l}.png") plt.clf() elif ndim == 3: @@ -303,15 +301,11 @@ def test_plot_centers(): pmv = flopy.plot.PlotMapView(modelgrid=grid) pc = pmv.plot_centers() if not isinstance(pc, PathCollection): - raise AssertionError( - "plot_centers() not returning PathCollection object" - ) + raise AssertionError("plot_centers() not returning PathCollection object") verts = pc._offsets if not verts.shape[0] == active_cells: - raise AssertionError( - "plot_centers() not properly masking inactive cells" - ) + raise AssertionError("plot_centers() not properly masking inactive cells") for vert in verts: vert = tuple(vert) diff --git a/autotest/test_plot_particle_tracks.py b/autotest/test_plot_particle_tracks.py index 8ba4342bf2..e7f9986924 100644 --- a/autotest/test_plot_particle_tracks.py +++ b/autotest/test_plot_particle_tracks.py @@ -29,9 +29,7 @@ def modpath_model(function_tmpdir, example_data_path): model_ws=function_tmpdir, ) - mpb = Modpath6Bas( - mp, hdry=ml.lpf.hdry, laytyp=ml.lpf.laytyp, ibound=1, prsity=0.1 - ) + mpb = Modpath6Bas(mp, hdry=ml.lpf.hdry, laytyp=ml.lpf.laytyp, ibound=1, prsity=0.1) sim = mp.create_mpsim( trackdir="forward", @@ -49,9 +47,7 @@ def test_plot_map_view_mp6_plot_pathline(modpath_model): mp.run_model(silent=False) pthobj = PathlineFile(join(mp.model_ws, "ex6.mppth")) - well_pathlines = pthobj.get_destination_pathline_data( - dest_cells=[(4, 12, 12)] - ) + well_pathlines = pthobj.get_destination_pathline_data(dest_cells=[(4, 12, 12)]) def test_plot(pl): mx = PlotMapView(model=ml) @@ -82,9 +78,7 @@ def test_plot_cross_section_mp6_plot_pathline(modpath_model): mp.run_model(silent=False) pthobj = PathlineFile(join(mp.model_ws, "ex6.mppth")) - well_pathlines = pthobj.get_destination_pathline_data( - dest_cells=[(4, 12, 12)] - ) + well_pathlines = pthobj.get_destination_pathline_data(dest_cells=[(4, 12, 12)]) def test_plot(pl): mx = PlotCrossSection(model=ml, line={"row": 4}) @@ -153,9 +147,7 @@ def test_plot_map_view_mp6_endpoint(modpath_model): # colorbar: color by time to termination mv = PlotMapView(model=ml) mv.plot_bc("WEL", kper=2, color="blue") - ep = mv.plot_endpoint( - endpts, direction="ending", shrink=0.5, colorbar=True - ) + ep = mv.plot_endpoint(endpts, direction="ending", shrink=0.5, colorbar=True) # plt.show() assert isinstance(ep, PathCollection) diff --git a/autotest/test_plot_quasi3d.py b/autotest/test_plot_quasi3d.py index f26eb03dac..3ffdda12c3 100644 --- a/autotest/test_plot_quasi3d.py +++ b/autotest/test_plot_quasi3d.py @@ -101,9 +101,7 @@ def quasi3d_model(function_tmpdir): @requires_exe("mf2005") def test_map_plot_with_quasi3d_layers(quasi3d_model): # read output - hf = HeadFile( - os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.hds") - ) + hf = HeadFile(os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.hds")) head = hf.get_data(totim=1.0) cbb = CellBudgetFile( os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.cbc") @@ -127,9 +125,7 @@ def test_map_plot_with_quasi3d_layers(quasi3d_model): @requires_exe("mf2005") def test_cross_section_with_quasi3d_layers(quasi3d_model): # read output - hf = HeadFile( - os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.hds") - ) + hf = HeadFile(os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.hds")) head = hf.get_data(totim=1.0) cbb = CellBudgetFile( os.path.join(quasi3d_model.model_ws, f"{quasi3d_model.name}.cbc") diff --git a/autotest/test_plotutil.py b/autotest/test_plotutil.py index 8aa228f8aa..ae88d1133d 100644 --- a/autotest/test_plotutil.py +++ b/autotest/test_plotutil.py @@ -329,15 +329,11 @@ @pytest.mark.parametrize("dataframe", [True, False]) def test_to_mp7_pathlines(dataframe): prt_pls = ( - PRT_TEST_PATHLINES - if dataframe - else PRT_TEST_PATHLINES.to_records(index=False) + PRT_TEST_PATHLINES if dataframe else PRT_TEST_PATHLINES.to_records(index=False) ) mp7_pls = to_mp7_pathlines(prt_pls) assert ( - type(prt_pls) - == type(mp7_pls) - == (pd.DataFrame if dataframe else np.recarray) + type(prt_pls) == type(mp7_pls) == (pd.DataFrame if dataframe else np.recarray) ) assert len(mp7_pls) == 10 assert set( @@ -361,15 +357,11 @@ def test_to_mp7_pathlines_empty(dataframe): @pytest.mark.parametrize("dataframe", [True, False]) def test_to_mp7_pathlines_noop(dataframe): prt_pls = ( - MP7_TEST_PATHLINES - if dataframe - else MP7_TEST_PATHLINES.to_records(index=False) + MP7_TEST_PATHLINES if dataframe else MP7_TEST_PATHLINES.to_records(index=False) ) mp7_pls = to_mp7_pathlines(prt_pls) assert ( - type(prt_pls) - == type(mp7_pls) - == (pd.DataFrame if dataframe else np.recarray) + type(prt_pls) == type(mp7_pls) == (pd.DataFrame if dataframe else np.recarray) ) assert len(mp7_pls) == 2 assert set( @@ -383,9 +375,7 @@ def test_to_mp7_pathlines_noop(dataframe): @pytest.mark.parametrize("dataframe", [True, False]) def test_to_mp7_endpoints(dataframe): mp7_eps = to_mp7_endpoints( - PRT_TEST_PATHLINES - if dataframe - else PRT_TEST_PATHLINES.to_records(index=False) + PRT_TEST_PATHLINES if dataframe else PRT_TEST_PATHLINES.to_records(index=False) ) assert len(mp7_eps) == 1 assert np.isclose(mp7_eps.time[0], PRT_TEST_PATHLINES.t.max()) @@ -411,9 +401,7 @@ def test_to_mp7_endpoints_empty(dataframe): def test_to_mp7_endpoints_noop(dataframe): """Test a recarray or dataframe which already contains MP7 endpoint data""" mp7_eps = to_mp7_endpoints( - MP7_TEST_ENDPOINTS - if dataframe - else MP7_TEST_ENDPOINTS.to_records(index=False) + MP7_TEST_ENDPOINTS if dataframe else MP7_TEST_ENDPOINTS.to_records(index=False) ) assert np.array_equal( mp7_eps if dataframe else pd.DataFrame(mp7_eps), MP7_TEST_ENDPOINTS @@ -423,9 +411,7 @@ def test_to_mp7_endpoints_noop(dataframe): @pytest.mark.parametrize("dataframe", [True, False]) def test_to_prt_pathlines_roundtrip(dataframe): mp7_pls = to_mp7_pathlines( - PRT_TEST_PATHLINES - if dataframe - else PRT_TEST_PATHLINES.to_records(index=False) + PRT_TEST_PATHLINES if dataframe else PRT_TEST_PATHLINES.to_records(index=False) ) prt_pls = to_prt_pathlines(mp7_pls) if not dataframe: diff --git a/autotest/test_postprocessing.py b/autotest/test_postprocessing.py index d45ab84f3c..eb51ae5071 100644 --- a/autotest/test_postprocessing.py +++ b/autotest/test_postprocessing.py @@ -101,9 +101,7 @@ def test_get_structured_faceflows(function_tmpdir, nlay, nrow, ncol): iface = 6 # top for i in range(0, max_dim): # ((layer,row,col),head,iface) - cell_id = ( - (0, 0, i) if ncol > 1 else (0, i, 0) if nrow > 1 else (i, 0, 0) - ) + cell_id = (0, 0, i) if ncol > 1 else (0, i, 0) if nrow > 1 else (i, 0, 0) chd_rec.append((cell_id, h[i], iface)) chd = flopy.mf6.ModflowGwfchd( gwf, @@ -362,12 +360,8 @@ def test_get_structured_faceflows_idomain( Qv_aqui = cbf0[~idx].sum() # Flow across aquitard print(f"Total flow across bottom of upper aquifer {Qv_sum:0.2f} m^3/d") - print( - f"Flow across bottom of upper aquifer to aquitard {Qv_aqui:0.2f} m^3/d" - ) - print( - f"Flow across bottom of upper aquifer to lower aquifer {Qv_wind:0.2f} m^3/d" - ) + print(f"Flow across bottom of upper aquifer to aquitard {Qv_aqui:0.2f} m^3/d") + print(f"Flow across bottom of upper aquifer to lower aquifer {Qv_wind:0.2f} m^3/d") print(np.isclose(-Qwell, Qv_sum, atol=1e-3)) assert np.isclose(-Qwell, Qv_sum, atol=1e-3) @@ -430,14 +424,10 @@ def test_structured_faceflows_3d_shape(function_tmpdir): tdis = ModflowTdis(sim) ims = ModflowIms(sim) gwf = ModflowGwf(sim, modelname=name, save_flows=True) - dis = ModflowGwfdis( - gwf, nlay=3, nrow=10, ncol=10, top=0, botm=[-1, -2, -3] - ) + dis = ModflowGwfdis(gwf, nlay=3, nrow=10, ncol=10, top=0, botm=[-1, -2, -3]) ic = ModflowGwfic(gwf) npf = ModflowGwfnpf(gwf, save_specific_discharge=True) - chd = ModflowGwfchd( - gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 9, 9), 0.0]] - ) + chd = ModflowGwfchd(gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 9, 9), 0.0]]) budget_file = name + ".bud" head_file = name + ".hds" oc = ModflowGwfoc( @@ -456,15 +446,9 @@ def test_structured_faceflows_3d_shape(function_tmpdir): flowja, grb_file=function_tmpdir / "mymodel.dis.grb", ) - assert ( - frf.shape == head.shape - ), f"frf.shape {frf.shape} != head.shape {head.shape}" - assert ( - fff.shape == head.shape - ), f"frf.shape {frf.shape} != head.shape {head.shape}" - assert ( - flf.shape == head.shape - ), f"frf.shape {frf.shape} != head.shape {head.shape}" + assert frf.shape == head.shape, f"frf.shape {frf.shape} != head.shape {head.shape}" + assert fff.shape == head.shape, f"frf.shape {frf.shape} != head.shape {head.shape}" + assert flf.shape == head.shape, f"frf.shape {frf.shape} != head.shape {head.shape}" def test_get_transmissivities(function_tmpdir): diff --git a/autotest/test_rasters.py b/autotest/test_rasters.py index 3a26e46389..3a9e5a87b5 100644 --- a/autotest/test_rasters.py +++ b/autotest/test_rasters.py @@ -56,9 +56,7 @@ def test_rasters(example_data_path): if (np.max(data) - 2608.557) > 1e-4: raise AssertionError - data = rio.resample_to_grid( - ml.modelgrid, band=rio.bands[0], method="nearest" - ) + data = rio.resample_to_grid(ml.modelgrid, band=rio.bands[0], method="nearest") if data.size != 5913: raise AssertionError if abs(np.min(data) - 1942.1735) > 1e-4: @@ -107,15 +105,11 @@ def test_raster_sampling_methods(example_data_path): } for method, value in methods.items(): - data = rio.resample_to_grid( - ml.modelgrid, band=rio.bands[0], method=method - ) + data = rio.resample_to_grid(ml.modelgrid, band=rio.bands[0], method=method) print(data[30, 37]) if np.abs(data[30, 37] - value) > 1e-05: - raise AssertionError( - f"{method} resampling returning incorrect values" - ) + raise AssertionError(f"{method} resampling returning incorrect values") @requires_pkg("rasterio") @@ -136,9 +130,7 @@ def test_raster_reprojection(example_data_path): raise AssertionError(f"Raster not converted to EPSG {wgs_epsg}") transform = wgs_raster._meta["transform"] - if not np.isclose(transform.c, wgs_xmin) and not np.isclose( - transform.f, wgs_ymax - ): + if not np.isclose(transform.c, wgs_xmin) and not np.isclose(transform.f, wgs_ymax): raise AssertionError(f"Raster not reprojected to EPSG {wgs_epsg}") raster.to_crs(epsg=wgs_epsg, inplace=True) @@ -212,9 +204,7 @@ def test_create_raster_from_array_transform(example_data_path): transform.a / 2, 0, transform.c, 0, transform.e / 2, transform.f ) - robj = Raster.raster_from_array( - array, crs=raster.crs, transform=new_transform - ) + robj = Raster.raster_from_array(array, crs=raster.crs, transform=new_transform) rxmin, rxmax, rymin, rymax = robj.bounds xmin, xmax, ymin, ymax = raster.bounds diff --git a/autotest/test_seawat.py b/autotest/test_seawat.py index aba1c125c6..4cb4ec4357 100644 --- a/autotest/test_seawat.py +++ b/autotest/test_seawat.py @@ -195,9 +195,7 @@ def test_seawat2_henry(function_tmpdir): def swt4_namfiles(): - return [ - str(p) for p in (get_example_data_path() / "swtv4_test").rglob("*.nam") - ] + return [str(p) for p in (get_example_data_path() / "swtv4_test").rglob("*.nam")] @requires_exe("swtv4") diff --git a/autotest/test_sfr.py b/autotest/test_sfr.py index c50dcffe50..34f3daf986 100644 --- a/autotest/test_sfr.py +++ b/autotest/test_sfr.py @@ -116,9 +116,7 @@ def sfr_data(): r["iseg"] = sorted(list(range(1, 10)) * 3) r["ireach"] = [1, 2, 3] * 9 - d = create_empty_recarray( - 9, dtype=np.dtype([("nseg", int), ("outseg", int)]) - ) + d = create_empty_recarray(9, dtype=np.dtype([("nseg", int), ("outseg", int)])) d["nseg"] = range(1, 10) d["outseg"] = [4, 0, 6, 8, 3, 8, 1, 2, 8] return r, d @@ -193,9 +191,7 @@ def sfr_process(mfnam, sfrfile, model_ws, outfolder): "UZFtest2.nam", "UZFtest2.sfr", mf2005_model_path, function_tmpdir ) - assert isinstance( - sfr.plot()[0], matplotlib.axes.Axes - ) # test the plot() method + assert isinstance(sfr.plot()[0], matplotlib.axes.Axes) # test the plot() method matplotlib.pyplot.close() def interpolate_to_reaches(sfr): @@ -216,12 +212,7 @@ def interpolate_to_reaches(sfr): ] xp = [dist[0], dist[-1]] assert ( - np.sum( - np.abs( - reaches[reachvar] - - np.interp(dist, xp, fp).tolist() - ) - ) + np.sum(np.abs(reaches[reachvar] - np.interp(dist, xp, fp).tolist())) < 0.01 ) return reach_data @@ -239,15 +230,11 @@ def interpolate_to_reaches(sfr): out_inds = np.asarray(sfr.reach_data.reachID == outreach).nonzero() assert ( sfr.reach_data.slope[reach_inds] - == ( - sfr.reach_data.strtop[reach_inds] - sfr.reach_data.strtop[out_inds] - ) + == (sfr.reach_data.strtop[reach_inds] - sfr.reach_data.strtop[out_inds]) / sfr.reach_data.rchlen[reach_inds] ) chk = sfr.check() - assert ( - sfr.reach_data.slope.min() < 0.0001 and "minimum slope" in chk.warnings - ) + assert sfr.reach_data.slope.min() < 0.0001 and "minimum slope" in chk.warnings # negative segments for lakes shouldn't be included in segment numbering order check assert "segment numbering order" not in chk.warnings sfr.reach_data.slope[0] = 1.1 @@ -456,9 +443,7 @@ def test_example(mf2005_model_path): nparseg = 0 const = 1.486 # constant for manning's equation, units of cfs dleak = 0.0001 # closure tolerance for stream stage computation - ipakcb = ( - 53 # flag for writing SFR output to cell-by-cell budget (on unit 53) - ) + ipakcb = 53 # flag for writing SFR output to cell-by-cell budget (on unit 53) istcb2 = 81 # flag for writing SFR output to text file dataset_5 = {0: [nss, 0, 0]} # dataset 5 (see online guide) @@ -735,9 +720,7 @@ def test_SfrFile(function_tmpdir, sfr_examples_path, mf2005_model_path): assert df.gradient.values[-1] == 5.502e-02 assert df.shape == (1080, 20) - ml = Modflow.load( - "test1tr.nam", model_ws=mf2005_model_path, exe_name="mf2005" - ) + ml = Modflow.load("test1tr.nam", model_ws=mf2005_model_path, exe_name="mf2005") ml.change_model_ws(function_tmpdir) ml.write_input() ml.run_model() @@ -848,9 +831,7 @@ def test_sfrcheck(function_tmpdir, mf2005_model_path): # throw warning if isfropt=1 and strtop at default assert "maximum streambed top" in chk.warnings assert "minimum streambed top" in chk.warnings - m.sfr.reach_data["strtop"] = m.sfr._interpolate_to_reaches( - "elevup", "elevdn" - ) + m.sfr.reach_data["strtop"] = m.sfr._interpolate_to_reaches("elevup", "elevdn") m.sfr.get_slopes() m.sfr.reach_data["strhc1"] = 1.0 m.sfr.reach_data["strthick"] = 1.0 @@ -899,8 +880,7 @@ def test_isfropt_icalc(function_tmpdir, example_data_path, isfropt, icalc): success = ml.run_model()[0] if not success: raise AssertionError( - f"sfrtest{isfropt}{icalc}.nam " - "is broken, please fix SFR 6a, 6bc logic!" + f"sfrtest{isfropt}{icalc}.nam is broken, please fix SFR 6a, 6bc logic!" ) @@ -958,8 +938,6 @@ def test_mf2005(function_tmpdir, namfile): ) for name in str2.dtype2.names: assert ( - np.array_equal( - str2.segment_data[0][name], m.str.segment_data[0][name] - ) + np.array_equal(str2.segment_data[0][name], m.str.segment_data[0][name]) is True ) diff --git a/autotest/test_shapefile_utils.py b/autotest/test_shapefile_utils.py index 5d1292c7df..8c13c4d27f 100644 --- a/autotest/test_shapefile_utils.py +++ b/autotest/test_shapefile_utils.py @@ -23,9 +23,7 @@ def test_model_attributes_to_shapefile(example_data_path, function_tmpdir): name = "freyberg" namfile = f"{name}.nam" ws = example_data_path / name - m = flopy.modflow.Modflow.load( - namfile, model_ws=ws, check=False, verbose=False - ) + m = flopy.modflow.Modflow.load(namfile, model_ws=ws, check=False, verbose=False) shpfile_path = function_tmpdir / f"{name}.shp" pakg_names = ["DIS", "BAS6", "LPF", "WEL", "RIV", "RCH", "OC", "PCG"] model_attributes_to_shapefile(shpfile_path, m, pakg_names) @@ -33,9 +31,7 @@ def test_model_attributes_to_shapefile(example_data_path, function_tmpdir): # freyberg mf6 model name = "mf6-freyberg" - sim = flopy.mf6.MFSimulation.load( - sim_name=name, sim_ws=example_data_path / name - ) + sim = flopy.mf6.MFSimulation.load(sim_name=name, sim_ws=example_data_path / name) m = sim.get_model() shpfile_path = function_tmpdir / f"{name}.shp" pakg_names = ["dis", "bas6", "npf", "wel", "riv", "rch", "oc", "pcg"] diff --git a/autotest/test_specific_discharge.py b/autotest/test_specific_discharge.py index cd218ab301..e0f0596e44 100644 --- a/autotest/test_specific_discharge.py +++ b/autotest/test_specific_discharge.py @@ -197,9 +197,7 @@ def mf6_model(function_tmpdir): # create tdis package tdis_rc = [(1.0, 1, 1.0)] - tdis = ModflowTdis( - sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc - ) + tdis = ModflowTdis(sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc) # create gwf model gwf = ModflowGwf( @@ -267,8 +265,7 @@ def mf6_model(function_tmpdir): # create ghb package ghbspd = [ - [(ghb_i[0], ghb_i[1], ghb_i[2]), ghb_i[3], ghb_i[4]] - for ghb_i in ghb_list + [(ghb_i[0], ghb_i[1], ghb_i[2]), ghb_i[3], ghb_i[4]] for ghb_i in ghb_list ] ghb = ModflowGwfghb(gwf, print_input=True, stress_period_data=ghbspd) @@ -281,8 +278,7 @@ def mf6_model(function_tmpdir): # create drn package drnspd = [ - [(drn_i[0], drn_i[1], drn_i[2]), drn_i[3], drn_i[4]] - for drn_i in drn_list + [(drn_i[0], drn_i[1], drn_i[2]), drn_i[3], drn_i[4]] for drn_i in drn_list ] drn = ModflowGwfdrn(gwf, print_input=True, stress_period_data=drnspd) @@ -359,9 +355,7 @@ def test_extended_budget_default(mf2005_model): mf.run_model() # load and postprocess - Qx_ext, Qy_ext, Qz_ext = get_extended_budget( - function_tmpdir / "mf2005.cbc" - ) + Qx_ext, Qy_ext, Qz_ext = get_extended_budget(function_tmpdir / "mf2005.cbc") # basic check basic_check(Qx_ext, Qy_ext, Qz_ext) @@ -389,9 +383,7 @@ def extended_budget_comprehensive(function_tmpdir): basic_check(Qx_ext, Qy_ext, Qz_ext) # local balance check - local_balance_check( - Qx_ext, Qy_ext, Qz_ext, function_tmpdir / "mf2005.hds", mf - ) + local_balance_check(Qx_ext, Qy_ext, Qz_ext, function_tmpdir / "mf2005.hds", mf) # overall check overall = np.sum(Qx_ext) + np.sum(Qy_ext) + np.sum(Qz_ext) @@ -501,9 +493,7 @@ def test_specific_discharge_mf6(mf6_model): sim.run_simulation() # load and postprocess - sim = MFSimulation.load( - sim_name="mf6", sim_ws=function_tmpdir, verbosity_level=0 - ) + sim = MFSimulation.load(sim_name="mf6", sim_ws=function_tmpdir, verbosity_level=0) gwf = sim.get_model("mf6") hds = bf.HeadFile(function_tmpdir / "mf6.hds") head = hds.get_data() @@ -528,9 +518,7 @@ def test_specific_discharge_mf6(mf6_model): ax = modelmap.ax assert len(ax.collections) != 0, "Discharge vector was not drawn" for col in ax.collections: - assert isinstance( - col, Quiver - ), f"Unexpected collection type: {type(col)}" + assert isinstance(col, Quiver), f"Unexpected collection type: {type(col)}" assert np.sum(quiver.Umask) == 1 pos = np.sum(quiver.X) + np.sum(quiver.Y) assert np.allclose(pos, 1600.0) diff --git a/autotest/test_subwt.py b/autotest/test_subwt.py index 484a0f3bd1..45c0e1f5aa 100644 --- a/autotest/test_subwt.py +++ b/autotest/test_subwt.py @@ -68,9 +68,7 @@ def test_subwt(function_tmpdir, ibound_path): sp2_wells.append([1, 8, 9, -72000.0]) sp2_wells.append([3, 11, 6, -72000.0]) - ModflowWel( - ml, stress_period_data={0: sp1_wells, 1: sp2_wells, 2: sp1_wells} - ) + ModflowWel(ml, stress_period_data={0: sp1_wells, 1: sp2_wells, 2: sp1_wells}) ModflowSwt( ml, diff --git a/autotest/test_swr_binaryread.py b/autotest/test_swr_binaryread.py index 02bf20a96b..2efd6824fe 100644 --- a/autotest/test_swr_binaryread.py +++ b/autotest/test_swr_binaryread.py @@ -42,12 +42,8 @@ def test_swr_binary_stage(swr_test_path, ipos): for idx in range(ntimes): r = sobj.get_data(idx=idx) - assert ( - r is not None - ), "SwrStage could not read data with get_data(idx=)" - assert r.shape == ( - 18, - ), "SwrStage stage data shape does not equal (18,)" + assert r is not None, "SwrStage could not read data with get_data(idx=)" + assert r.shape == (18,), "SwrStage stage data shape does not equal (18,)" assert ( len(r.dtype.names) == 2 ), "SwrStage stage data dtype does not have 2 entries" @@ -63,9 +59,7 @@ def test_swr_binary_stage(swr_test_path, ipos): assert ( r is not None ), "SwrStage could not read data with get_data(kswrkstpkper=)" - assert r.shape == ( - 18, - ), "SwrStage stage data shape does not equal (18,)" + assert r.shape == (18,), "SwrStage stage data shape does not equal (18,)" assert ( len(r.dtype.names) == 2 ), "SwrStage stage data dtype does not have 2 entries" @@ -75,20 +69,14 @@ def test_swr_binary_stage(swr_test_path, ipos): for time in times: r = sobj.get_data(totim=time) - assert ( - r is not None - ), "SwrStage could not read data with get_data(tottim=)" - assert r.shape == ( - 18, - ), "SwrStage stage data shape does not equal (18,)" + assert r is not None, "SwrStage could not read data with get_data(tottim=)" + assert r.shape == (18,), "SwrStage stage data shape does not equal (18,)" assert ( len(r.dtype.names) == 2 ), "SwrStage stage data dtype does not have 2 entries" ts = sobj.get_ts(irec=17) - assert ts.shape == ( - 336, - ), "SwrStage stage timeseries shape does not equal (336,)" + assert ts.shape == (336,), "SwrStage stage timeseries shape does not equal (336,)" assert ( len(ts.dtype.names) == 2 ), "SwrStage stage time series stage data dtype does not have 2 entries" @@ -111,15 +99,9 @@ def test_swr_binary_budget(swr_test_path, ipos): for idx in range(ntimes): r = sobj.get_data(idx=idx) - assert ( - r is not None - ), "SwrBudget could not read data with get_data(idx=)" - assert r.shape == ( - 18, - ), "SwrBudget budget data shape does not equal (18,)" - assert ( - len(r.dtype.names) == 15 - ), "SwrBudget data dtype does not have 15 entries" + assert r is not None, "SwrBudget could not read data with get_data(idx=)" + assert r.shape == (18,), "SwrBudget budget data shape does not equal (18,)" + assert len(r.dtype.names) == 15, "SwrBudget data dtype does not have 15 entries" # plt.bar(range(18), r['inf-out']) # plt.show() @@ -135,9 +117,7 @@ def test_swr_binary_budget(swr_test_path, ipos): assert ( r is not None ), "SwrBudget could not read data with get_data(kswrkstpkper=)" - assert r.shape == ( - 18, - ), "SwrBudget budget data shape does not equal (18,)" + assert r.shape == (18,), "SwrBudget budget data shape does not equal (18,)" assert ( len(r.dtype.names) == 15 ), "SwrBudget budget data dtype does not have 15 entries" @@ -147,20 +127,14 @@ def test_swr_binary_budget(swr_test_path, ipos): for time in times: r = sobj.get_data(totim=time) - assert ( - r is not None - ), "SwrBudget could not read data with get_data(tottim=)" - assert r.shape == ( - 18, - ), "SwrBudget budget data shape does not equal (18,)" + assert r is not None, "SwrBudget could not read data with get_data(tottim=)" + assert r.shape == (18,), "SwrBudget budget data shape does not equal (18,)" assert ( len(r.dtype.names) == 15 ), "SwrBudget budget data dtype does not have 15 entries" ts = sobj.get_ts(irec=17) - assert ts.shape == ( - 336, - ), "SwrBudget budget timeseries shape does not equal (336,)" + assert ts.shape == (336,), "SwrBudget budget timeseries shape does not equal (336,)" assert ( len(ts.dtype.names) == 15 ), "SwrBudget time series budget data dtype does not have 15 entries" @@ -191,9 +165,7 @@ def test_swr_binary_qm(swr_test_path, ipos): r = sobj.get_data(idx=idx) assert r is not None, "SwrFlow could not read data with get_data(idx=)" assert r.shape == (40,), "SwrFlow qm data shape does not equal (40,)" - assert ( - len(r.dtype.names) == 3 - ), "SwrFlow qm data dtype does not have 3 entries" + assert len(r.dtype.names) == 3, "SwrFlow qm data dtype does not have 3 entries" # plt.bar(range(40), r['flow']) # plt.show() @@ -206,39 +178,27 @@ def test_swr_binary_qm(swr_test_path, ipos): for kkk in kswrkstpkper: r = sobj.get_data(kswrkstpkper=kkk) - assert ( - r is not None - ), "SwrFlow could not read data with get_data(kswrkstpkper=)" + assert r is not None, "SwrFlow could not read data with get_data(kswrkstpkper=)" assert r.shape == (40,), "SwrFlow qm data shape does not equal (40,)" - assert ( - len(r.dtype.names) == 3 - ), "SwrFlow qm data dtype does not have 3 entries" + assert len(r.dtype.names) == 3, "SwrFlow qm data dtype does not have 3 entries" times = sobj.get_times() assert len(times) == 336, "SwrFlow times length does not equal 336" for time in times: r = sobj.get_data(totim=time) - assert ( - r is not None - ), "SwrFlow could not read data with get_data(tottim=)" + assert r is not None, "SwrFlow could not read data with get_data(tottim=)" assert r.shape == (40,), "SwrFlow qm data shape does not equal (40,)" - assert ( - len(r.dtype.names) == 3 - ), "SwrFlow qm data dtype does not have 3 entries" + assert len(r.dtype.names) == 3, "SwrFlow qm data dtype does not have 3 entries" ts = sobj.get_ts(irec=17, iconn=16) - assert ts.shape == ( - 336, - ), "SwrFlow qm timeseries shape does not equal (336,)" + assert ts.shape == (336,), "SwrFlow qm timeseries shape does not equal (336,)" assert ( len(ts.dtype.names) == 3 ), "SwrFlow time series qm data dtype does not have 3 entries" ts2 = sobj.get_ts(irec=16, iconn=17) - assert ts2.shape == ( - 336, - ), "SwrFlow qm timeseries shape does not equal (336,)" + assert ts2.shape == (336,), "SwrFlow qm timeseries shape does not equal (336,)" assert ( len(ts2.dtype.names) == 3 ), "SwrFlow time series qm data dtype does not have 3 entries" @@ -262,12 +222,8 @@ def test_swr_binary_qaq(swr_test_path, ipos): for idx in range(ntimes): r = sobj.get_data(idx=idx) - assert ( - r is not None - ), "SwrExchange could not read data with get_data(idx=)" - assert r.shape == ( - 21, - ), "SwrExchange qaq data shape does not equal (21,)" + assert r is not None, "SwrExchange could not read data with get_data(idx=)" + assert r.shape == (21,), "SwrExchange qaq data shape does not equal (21,)" assert ( len(r.dtype.names) == 11 ), "SwrExchange qaq data dtype does not have 11 entries" @@ -286,9 +242,7 @@ def test_swr_binary_qaq(swr_test_path, ipos): assert ( r is not None ), "SwrExchange could not read data with get_data(kswrkstpkper=)" - assert r.shape == ( - 21, - ), "SwrExchange qaq data shape does not equal (21,)" + assert r.shape == (21,), "SwrExchange qaq data shape does not equal (21,)" assert ( len(r.dtype.names) == 11 ), "SwrExchange qaq data dtype does not have 11 entries" @@ -298,20 +252,14 @@ def test_swr_binary_qaq(swr_test_path, ipos): for time in times: r = sobj.get_data(totim=time) - assert ( - r is not None - ), "SwrExchange could not read data with get_data(tottim=)" - assert r.shape == ( - 21, - ), "SwrExchange qaq data shape does not equal (21,)" + assert r is not None, "SwrExchange could not read data with get_data(tottim=)" + assert r.shape == (21,), "SwrExchange qaq data shape does not equal (21,)" assert ( len(r.dtype.names) == 11 ), "SwrExchange qaq data dtype does not have 11 entries" ts = sobj.get_ts(irec=17, klay=0) - assert ts.shape == ( - 350, - ), "SwrExchange timeseries shape does not equal (350,)" + assert ts.shape == (350,), "SwrExchange timeseries shape does not equal (350,)" assert ( len(ts.dtype.names) == 11 ), "SwrExchange time series qaq data dtype does not have 11 entries" @@ -334,12 +282,8 @@ def test_swr_binary_structure(swr_test_path, ipos): for idx in range(ntimes): r = sobj.get_data(idx=idx) - assert ( - r is not None - ), "SwrStructure could not read data with get_data(idx=)" - assert r.shape == ( - 2, - ), "SwrStructure structure data shape does not equal (2,)" + assert r is not None, "SwrStructure could not read data with get_data(idx=)" + assert r.shape == (2,), "SwrStructure structure data shape does not equal (2,)" assert ( len(r.dtype.names) == 8 ), "SwrStructure structure data dtype does not have 8 entries" @@ -355,9 +299,7 @@ def test_swr_binary_structure(swr_test_path, ipos): assert ( r is not None ), "SwrStructure could not read data with get_data(kswrkstpkper=)" - assert r.shape == ( - 2, - ), "SwrStructure structure data shape does not equal (2,)" + assert r.shape == (2,), "SwrStructure structure data shape does not equal (2,)" assert ( len(r.dtype.names) == 8 ), "SwrStructure structure data dtype does not have 8 entries" @@ -367,20 +309,14 @@ def test_swr_binary_structure(swr_test_path, ipos): for time in times: r = sobj.get_data(totim=time) - assert ( - r is not None - ), "SwrStructure could not read data with get_data(tottim=)" - assert r.shape == ( - 2, - ), "SwrStructure structure data shape does not equal (2,)" + assert r is not None, "SwrStructure could not read data with get_data(tottim=)" + assert r.shape == (2,), "SwrStructure structure data shape does not equal (2,)" assert ( len(r.dtype.names) == 8 ), "SwrStructure structure data dtype does not have 8 entries" ts = sobj.get_ts(irec=17, istr=0) - assert ts.shape == ( - 336, - ), "SwrStructure timeseries shape does not equal (336,)" + assert ts.shape == (336,), "SwrStructure timeseries shape does not equal (336,)" assert ( len(ts.dtype.names) == 8 ), "SwrStructure time series structure data dtype does not have 8 entries" @@ -410,41 +346,25 @@ def test_swr_binary_obs(swr_test_path, ipos): assert len(times) == 336, "SwrFile times length does not equal 336" ts = sobj.get_data() - assert ts.shape == ( - 336, - ), "SwrObs length of data array does not equal (336,)" - assert ( - len(ts.dtype.names) == 10 - ), "SwrObs data does not have totim + 9 observations" + assert ts.shape == (336,), "SwrObs length of data array does not equal (336,)" + assert len(ts.dtype.names) == 10, "SwrObs data does not have totim + 9 observations" ts = sobj.get_data(obsname="OBS5") - assert ts.shape == ( - 336, - ), "SwrObs length of data array does not equal (336,)" - assert ( - len(ts.dtype.names) == 2 - ), "SwrObs data does not have totim + 1 observation" + assert ts.shape == (336,), "SwrObs length of data array does not equal (336,)" + assert len(ts.dtype.names) == 2, "SwrObs data does not have totim + 1 observation" # plt.plot(ts['totim'], ts['OBS5']) # plt.show() for idx in range(ntimes): d = sobj.get_data(idx=idx) - assert d.shape == ( - 1, - ), "SwrObs length of data array does not equal (1,)" - assert ( - len(d.dtype.names) == nobs + 1 - ), "SwrObs data does not have nobs + 1" + assert d.shape == (1,), "SwrObs length of data array does not equal (1,)" + assert len(d.dtype.names) == nobs + 1, "SwrObs data does not have nobs + 1" for time in times: d = sobj.get_data(totim=time) - assert d.shape == ( - 1, - ), "SwrObs length of data array does not equal (1,)" - assert ( - len(d.dtype.names) == nobs + 1 - ), "SwrObs data does not have nobs + 1" + assert d.shape == (1,), "SwrObs length of data array does not equal (1,)" + assert len(d.dtype.names) == nobs + 1, "SwrObs data does not have nobs + 1" # test get_dataframes() for idx in range(ntimes): diff --git a/autotest/test_usg.py b/autotest/test_usg.py index 969ca6041d..33817d2204 100644 --- a/autotest/test_usg.py +++ b/autotest/test_usg.py @@ -60,9 +60,7 @@ def test_usg_disu_load(function_tmpdir, mfusg_01A_nestedgrid_nognc_model_path): ): if isinstance(value1, (Util2d, Util3d)): assert np.array_equal(value1.array, value2.array) - elif isinstance( - value1, list - ): # this is for the jagged _get_neighbours list + elif isinstance(value1, list): # this is for the jagged _get_neighbours list assert np.all([np.all(v1 == v2) for v1, v2 in zip(value1, value2)]) elif not isinstance(value1, TemporalReference): assert value1 == value2 @@ -138,10 +136,7 @@ def test_usg_model(function_tmpdir): @requires_exe("mfusg") def test_usg_load_01B(function_tmpdir, mfusg_01A_nestedgrid_nognc_model_path): - print( - "testing 1-layer unstructured mfusg model " - "loading: 01A_nestedgrid_nognc.nam" - ) + print("testing 1-layer unstructured mfusg model loading: 01A_nestedgrid_nognc.nam") fname = mfusg_01A_nestedgrid_nognc_model_path / "flow.nam" assert os.path.isfile(fname), f"nam file not found {fname}" diff --git a/autotest/test_util_2d_and_3d.py b/autotest/test_util_2d_and_3d.py index 2080458033..9e72def24a 100644 --- a/autotest/test_util_2d_and_3d.py +++ b/autotest/test_util_2d_and_3d.py @@ -51,9 +51,7 @@ def test_transient3d(): # Make a transient 3d array with changing entries and then verify that # they can be reproduced through indexing - a = np.arange((nlay * nrow * ncol), dtype=np.float32).reshape( - (nlay, nrow, ncol) - ) + a = np.arange((nlay * nrow * ncol), dtype=np.float32).reshape((nlay, nrow, ncol)) t3d = {0: a, 2: 1025, 3: a, 4: 1000.0} t3d = Transient3d(ml, (nlay, nrow, ncol), np.float32, t3d, "fake") assert np.array_equal(t3d[0].array, a) @@ -178,9 +176,7 @@ def stress_util2d(model_ws, ml, nlay, nrow, ncol): files = os.listdir(ml.model_ws) print("\n\nexternal files: " + ",".join(files) + "\n\n") - ml1 = Modflow.load( - ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False - ) + ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False) print("testing load") assert not ml1.load_fail # check that both binary and cnstnt are being respected through @@ -198,9 +194,7 @@ def stress_util2d(model_ws, ml, nlay, nrow, ncol): else: files = os.listdir(ml.model_ws) print("\n\nexternal files: " + ",".join(files) + "\n\n") - ml1 = Modflow.load( - ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False - ) + ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False) print("testing load") assert not ml1.load_fail assert np.array_equal(ml1.lpf.vka.array, vk * 2.0) @@ -209,9 +203,7 @@ def stress_util2d(model_ws, ml, nlay, nrow, ncol): # more binary testing ml.lpf.vka[0]._array[0, 0] *= 3.0 ml.write_input() - ml1 = Modflow.load( - ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False - ) + ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False) assert np.array_equal(ml.lpf.vka.array, ml1.lpf.vka.array) assert np.array_equal(ml.lpf.hk.array, ml1.lpf.hk.array) @@ -236,9 +228,7 @@ def stress_util2d_for_joe_the_file_king(ml, nlay, nrow, ncol): assert np.array_equal(ml.lpf.hk.array, hk) assert np.array_equal(ml.lpf.vka.array, vk * 2.0) - ml1 = Modflow.load( - ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False - ) + ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False) print("testing load") assert not ml1.load_fail assert np.array_equal(ml1.lpf.vka.array, vk * 2.0) @@ -249,9 +239,7 @@ def stress_util2d_for_joe_the_file_king(ml, nlay, nrow, ncol): # more binary testing ml.lpf.vka[0]._array[0, 0] *= 3.0 ml.write_input() - ml1 = Modflow.load( - ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False - ) + ml1 = Modflow.load(ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False) assert np.array_equal(ml.lpf.vka.array, ml1.lpf.vka.array) assert np.array_equal(ml.lpf.hk.array, ml1.lpf.hk.array) @@ -433,9 +421,7 @@ def test_append_mflist(function_tmpdir): wel2 = ModflowWel(ml, stress_period_data=sp_data2) wel3 = ModflowWel( ml, - stress_period_data=wel2.stress_period_data.append( - wel1.stress_period_data - ), + stress_period_data=wel2.stress_period_data.append(wel1.stress_period_data), ) ml.write_input() diff --git a/autotest/test_util_geometry.py b/autotest/test_util_geometry.py index 053ef2f631..6105688859 100644 --- a/autotest/test_util_geometry.py +++ b/autotest/test_util_geometry.py @@ -78,12 +78,8 @@ def test_point_in_polygon_faces(): xpts_v, ypts_v = list(zip(*cell)) xpts_v = np.array([xpts_v]) ypts_v = np.array([ypts_v]) - xpts = np.array( - [[xpts_v[0, 0], xpts_v[0, 2], np.mean(xpts_v), np.mean(xpts_v)]] - ) - ypts = np.array( - [[np.mean(ypts_v), np.mean(ypts_v), ypts_v[0, 0], ypts_v[0, 2]]] - ) + xpts = np.array([[xpts_v[0, 0], xpts_v[0, 2], np.mean(xpts_v), np.mean(xpts_v)]]) + ypts = np.array([[np.mean(ypts_v), np.mean(ypts_v), ypts_v[0, 0], ypts_v[0, 2]]]) mask = point_in_polygon(xpts, ypts, cell) assert mask.sum() == 2 # only inner faces debug_plot(grid, cell, xpts, ypts, mask) diff --git a/autotest/test_uzf.py b/autotest/test_uzf.py index 9726163f02..d49a2b3a47 100644 --- a/autotest/test_uzf.py +++ b/autotest/test_uzf.py @@ -52,18 +52,10 @@ def test_create_uzf(function_tmpdir, mf2005_test_path, uzf_test_path): verbose=True, ) rm = [True if ".uz" in f else False for f in m.external_fnames] - m.external_fnames = [ - f for i, f in enumerate(m.external_fnames) if not rm[i] - ] - m.external_binflag = [ - f for i, f in enumerate(m.external_binflag) if not rm[i] - ] - m.external_output = [ - f for i, f in enumerate(m.external_output) if not rm[i] - ] - m.external_units = [ - f for i, f in enumerate(m.external_output) if not rm[i] - ] + m.external_fnames = [f for i, f in enumerate(m.external_fnames) if not rm[i]] + m.external_binflag = [f for i, f in enumerate(m.external_binflag) if not rm[i]] + m.external_output = [f for i, f in enumerate(m.external_output) if not rm[i]] + m.external_units = [f for i, f in enumerate(m.external_output) if not rm[i]] datpth = uzf_test_path irnbndpth = os.path.join(datpth, "irunbnd.dat") @@ -198,16 +190,11 @@ def test_create_uzf(function_tmpdir, mf2005_test_path, uzf_test_path): assert np.abs(np.sum(uzf.vks.array) / uzf.vks.cnstnt - 116.0) < 1e-5 assert uzf.eps._Util2d__value == 3.5 assert np.abs(uzf.thts._Util2d__value - 0.30) < 1e-5 - assert ( - np.abs(np.sum(uzf.extwc[0].array) / uzf.extwc[0].cnstnt - 176.0) < 1e4 - ) + assert np.abs(np.sum(uzf.extwc[0].array) / uzf.extwc[0].cnstnt - 176.0) < 1e4 for per in [0, 1]: assert np.abs(uzf.pet[per]._Util2d__value - 5e-8) < 1e-10 for per in range(m.nper): - assert ( - np.abs(np.sum(uzf.finf[per].array) / uzf.finf[per].cnstnt - 339.0) - < 1e4 - ) + assert np.abs(np.sum(uzf.finf[per].array) / uzf.finf[per].cnstnt - 339.0) < 1e4 assert True uzf.write_file() m2 = Modflow("UZFtest2_2", model_ws=ws) @@ -228,18 +215,13 @@ def test_create_uzf(function_tmpdir, mf2005_test_path, uzf_test_path): # load uzf test problem for nwt model with 'nwt_11_fmt'-style options and 'open/close' array types tpth = uzf_test_path / "load_uzf_for_nwt" - [ - shutil.copy(os.path.join(tpth, f), os.path.join(ws, f)) - for f in os.listdir(tpth) - ] + [shutil.copy(os.path.join(tpth, f), os.path.join(ws, f)) for f in os.listdir(tpth)] m3 = Modflow("UZFtest3", version="mfnwt", verbose=True) m3.model_ws = ws dis = ModflowDis.load(os.path.join(tpth, "UZFtest3.dis"), m3) uzf = ModflowUzf1.load(os.path.join(tpth, "UZFtest3.uzf"), m3) assert np.sum(uzf.iuzfbnd.array) == 28800 - assert np.isclose( - np.sum(uzf.finf.array) / uzf.finf[per].cnstnt, 13.7061, atol=1e-4 - ) + assert np.isclose(np.sum(uzf.finf.array) / uzf.finf[per].cnstnt, 13.7061, atol=1e-4) @requires_exe("mfnwt") @@ -301,18 +283,10 @@ def test_read_write_nwt_options(function_tmpdir): uzfopt.write_options(os.path.join(ws, "uzfopt.txt")) sfropt.write_options(os.path.join(ws, "sfropt.txt")) - welopt = OptionBlock.load_options( - os.path.join(ws, "welopt.txt"), ModflowWel - ) - welopt2 = OptionBlock.load_options( - os.path.join(ws, "welopt2.txt"), ModflowWel - ) - uzfopt = OptionBlock.load_options( - os.path.join(ws, "uzfopt.txt"), ModflowUzf1 - ) - sfropt = OptionBlock.load_options( - os.path.join(ws, "sfropt.txt"), ModflowSfr2 - ) + welopt = OptionBlock.load_options(os.path.join(ws, "welopt.txt"), ModflowWel) + welopt2 = OptionBlock.load_options(os.path.join(ws, "welopt2.txt"), ModflowWel) + uzfopt = OptionBlock.load_options(os.path.join(ws, "uzfopt.txt"), ModflowUzf1) + sfropt = OptionBlock.load_options(os.path.join(ws, "sfropt.txt"), ModflowSfr2) assert repr(welopt) == welstr assert repr(welopt2) == welstr2 @@ -473,9 +447,7 @@ def test_load_write_uzf_option_block(function_tmpdir, options_path): uzf2.write_file(os.path.join(function_tmpdir, uzf_name2)) ml.remove_package("UZF") - uzf3 = ModflowUzf1.load( - os.path.join(function_tmpdir, uzf_name2), ml, check=False - ) + uzf3 = ModflowUzf1.load(os.path.join(function_tmpdir, uzf_name2), ml, check=False) assert uzf3.options.smoothfact == 0.4 assert uzf3.smoothfact == 0.4 @@ -507,9 +479,7 @@ def test_load_write_uzf_option_line(function_tmpdir, options_path): uzf.write_file(os.path.join(function_tmpdir, uzf_name2)) ml.remove_package("UZF") - uzf2 = ModflowUzf1.load( - os.path.join(function_tmpdir, uzf_name2), ml, check=False - ) + uzf2 = ModflowUzf1.load(os.path.join(function_tmpdir, uzf_name2), ml, check=False) assert uzf2.nosurfleak assert uzf2.etsquare @@ -643,9 +613,7 @@ def test_uzf_negative_iuzfopt(function_tmpdir): success, buff = ml.run_model() assert success, "UZF model with -1 iuzfopt failed to run" - ml2 = Modflow.load( - "uzf_neg.nam", version="mfnwt", model_ws=function_tmpdir - ) + ml2 = Modflow.load("uzf_neg.nam", version="mfnwt", model_ws=function_tmpdir) np.testing.assert_array_equal( ml2.uzf.pet.array, np.full((2, 1, 10, 10), 0.1, np.float32) diff --git a/autotest/test_zonbud_utility.py b/autotest/test_zonbud_utility.py index 86991e6b26..50e3e2f17d 100644 --- a/autotest/test_zonbud_utility.py +++ b/autotest/test_zonbud_utility.py @@ -63,11 +63,7 @@ def read_zonebudget_file(fname): continue # Get mass-balance information for this block - elif ( - "Total" in items[0] - or "IN-OUT" in items[0] - or "Percent Error" in items[0] - ): + elif "Total" in items[0] or "IN-OUT" in items[0] or "Percent Error" in items[0]: continue # End of block @@ -147,9 +143,7 @@ def test_zonbud_aliases(cbc_f, zon_f): """ zon = ZoneBudget.read_zone_file(zon_f) aliases = {1: "Trey", 2: "Mike", 4: "Wilson", 0: "Carini"} - zb = ZoneBudget( - cbc_f, zon, kstpkper=(0, 1096), aliases=aliases, verbose=True - ) + zb = ZoneBudget(cbc_f, zon, kstpkper=(0, 1096), aliases=aliases, verbose=True) bud = zb.get_budget() assert bud[bud["name"] == "FROM_Mike"].shape[0] > 0, "No records returned." @@ -195,9 +189,7 @@ def test_zonbud_readwrite_zbarray(function_tmpdir): """ x = np.random.randint(100, 200, size=(5, 150, 200)) ZoneBudget.write_zone_file(function_tmpdir / "randint", x) - ZoneBudget.write_zone_file( - function_tmpdir / "randint", x, fmtin=35, iprn=2 - ) + ZoneBudget.write_zone_file(function_tmpdir / "randint", x, fmtin=35, iprn=2) z = ZoneBudget.read_zone_file(function_tmpdir / "randint") assert np.array_equal(x, z), "Input and output arrays do not match." diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index e2346e574f..ea98aae808 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -425,9 +425,7 @@ def cell_thickness(self): def thick(self): """Raises AttributeError, use :meth:`cell_thickness`.""" # DEPRECATED since version 3.4.0 - raise AttributeError( - "'thick' has been removed; use 'cell_thickness()'" - ) + raise AttributeError("'thick' has been removed; use 'cell_thickness()'") def saturated_thickness(self, array, mask=None): """ @@ -563,8 +561,7 @@ def zcellcenters(self): @property def xyzcellcenters(self): raise NotImplementedError( - "must define get_cellcenters in child " - "class to use this base class" + "must define get_cellcenters in child class to use this base class" ) @property @@ -626,9 +623,7 @@ def convert_grid(self, factor): ------- Grid object """ - raise NotImplementedError( - "convert_grid must be defined in the child class" - ) + raise NotImplementedError("convert_grid must be defined in the child class") def _set_neighbors(self, reset=False, method="rook"): """ @@ -686,9 +681,7 @@ def _set_neighbors(self, reset=False, method="rook"): pass # convert use dict to create a set that preserves insertion order - self._neighbors = { - i: list(dict.fromkeys(v)) for i, v in neighbors.items() - } + self._neighbors = {i: list(dict.fromkeys(v)) for i, v in neighbors.items()} self._edge_set = edge_set def neighbors(self, node=None, **kwargs): @@ -939,9 +932,7 @@ def get_coords(self, x, y): x += self._xoff y += self._yoff - return geometry.rotate( - x, y, self._xoff, self._yoff, self.angrot_radians - ) + return geometry.rotate(x, y, self._xoff, self._yoff, self.angrot_radians) def get_local_coords(self, x, y): """ @@ -1218,9 +1209,7 @@ def _zcoords(self): if self.top is not None and self.botm is not None: zcenters = [] top_3d = np.expand_dims(self.top, 0) - zbdryelevs = np.concatenate( - (top_3d, np.atleast_2d(self.botm)), axis=0 - ) + zbdryelevs = np.concatenate((top_3d, np.atleast_2d(self.botm)), axis=0) for ix in range(1, len(zbdryelevs)): zcenters.append((zbdryelevs[ix - 1] + zbdryelevs[ix]) / 2.0) @@ -1230,9 +1219,7 @@ def _zcoords(self): return zbdryelevs, zcenters # Exporting - def write_shapefile( - self, filename="grid.shp", crs=None, prjfile=None, **kwargs - ): + def write_shapefile(self, filename="grid.shp", crs=None, prjfile=None, **kwargs): """ Write a shapefile of the grid with just the row and column attributes. @@ -1262,6 +1249,4 @@ def write_shapefile( # initialize grid from a grb file @classmethod def from_binary_grid_file(cls, file_path, verbose=False): - raise NotImplementedError( - "must define from_binary_grid_file in child class" - ) + raise NotImplementedError("must define from_binary_grid_file in child class") diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index 59834b3826..f01184960f 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -222,11 +222,7 @@ def is_valid(self): @property def is_complete(self): - if ( - self.__delc is not None - and self.__delr is not None - and super().is_complete - ): + if self.__delc is not None and self.__delr is not None and super().is_complete: return True return False @@ -353,9 +349,7 @@ def xyzvertices(self): pass xgrid, ygrid = self.get_coords(xgrid, ygrid) if zgrid is not None: - self._cache_dict[cache_index] = CachedData( - [xgrid, ygrid, zgrid] - ) + self._cache_dict[cache_index] = CachedData([xgrid, ygrid, zgrid]) else: self._cache_dict[cache_index] = CachedData([xgrid, ygrid]) @@ -397,9 +391,7 @@ def zedges(self): cache_index not in self._cache_dict or self._cache_dict[cache_index].out_of_date ): - zedges = np.concatenate( - (np.array([self.top[0, 0]]), self.botm[:, 0, 0]) - ) + zedges = np.concatenate((np.array([self.top[0, 0]]), self.botm[:, 0, 0])) self._cache_dict[cache_index] = CachedData(zedges) if self._copy_cache: return self._cache_dict[cache_index].data @@ -480,9 +472,7 @@ def xyzcellcenters(self): if np.any(quasi3d): ibs[1:] = ibs[1:] + np.cumsum(quasi3d)[: self.__nlay - 1] for l, ib in enumerate(ibs[1:], 1): - z[l, :, :] = ( - self._botm[ib - 1, :, :] + self._botm[ib, :, :] - ) / 2.0 + z[l, :, :] = (self._botm[ib - 1, :, :] + self._botm[ib, :, :]) / 2.0 else: z = None if self._has_ref_coordinates: @@ -531,9 +521,7 @@ def grid_lines(self): if self._has_ref_coordinates: lines_trans = [] for ln in lines: - lines_trans.append( - [self.get_coords(*ln[0]), self.get_coords(*ln[1])] - ) + lines_trans.append([self.get_coords(*ln[0]), self.get_coords(*ln[1])]) return lines_trans return lines @@ -597,15 +585,15 @@ def is_regular_z(self): rel_tol = 1.0e-5 # regularity test in z direction - rel_diff_thick0 = ( - self.delz[0, :, :] - self.delz[0, 0, 0] - ) / self.delz[0, 0, 0] + rel_diff_thick0 = (self.delz[0, :, :] - self.delz[0, 0, 0]) / self.delz[ + 0, 0, 0 + ] failed = np.abs(rel_diff_thick0) > rel_tol is_regular_z = np.count_nonzero(failed) == 0 for k in range(1, self.nlay): - rel_diff_zk = ( - self.delz[k, :, :] - self.delz[0, :, :] - ) / self.delz[0, :, :] + rel_diff_zk = (self.delz[k, :, :] - self.delz[0, :, :]) / self.delz[ + 0, :, : + ] failed = np.abs(rel_diff_zk) > rel_tol is_regular_z = is_regular_z and np.count_nonzero(failed) == 0 @@ -633,9 +621,7 @@ def is_regular_xy(self): first_equal = np.abs(rel_diff_0) <= rel_tol # combine with regularity tests in x and z directions - is_regular_xy = ( - first_equal and self.is_regular_x and self.is_regular_y - ) + is_regular_xy = first_equal and self.is_regular_x and self.is_regular_y self._cache_dict[cache_index] = CachedData(is_regular_xy) if self._copy_cache: @@ -661,9 +647,7 @@ def is_regular_xz(self): first_equal = np.abs(rel_diff_0) <= rel_tol # combine with regularity tests in x and z directions - is_regular_xz = ( - first_equal and self.is_regular_x and self.is_regular_z - ) + is_regular_xz = first_equal and self.is_regular_x and self.is_regular_z self._cache_dict[cache_index] = CachedData(is_regular_xz) if self._copy_cache: @@ -689,9 +673,7 @@ def is_regular_yz(self): first_equal = np.abs(rel_diff_0) <= rel_tol # combine with regularity tests in x and y directions - is_regular_yz = ( - first_equal and self.is_regular_y and self.is_regular_z - ) + is_regular_yz = first_equal and self.is_regular_y and self.is_regular_z self._cache_dict[cache_index] = CachedData(is_regular_yz) if self._copy_cache: @@ -717,9 +699,7 @@ def is_regular(self): first_equal = np.abs(rel_diff_0) <= rel_tol # combine with regularity tests in x, y and z directions - is_regular = ( - first_equal and self.is_regular_z and self.is_regular_xy - ) + is_regular = first_equal and self.is_regular_z and self.is_regular_xy self._cache_dict[cache_index] = CachedData(is_regular) if self._copy_cache: @@ -744,9 +724,9 @@ def is_rectilinear(self): # rectilinearity test in z direction is_rect_z = True for k in range(self.nlay): - rel_diff_zk = ( - self.delz[k, :, :] - self.delz[k, 0, 0] - ) / self.delz[k, 0, 0] + rel_diff_zk = (self.delz[k, :, :] - self.delz[k, 0, 0]) / self.delz[ + k, 0, 0 + ] failed = np.abs(rel_diff_zk) > rel_tol is_rect_z = is_rect_z and np.count_nonzero(failed) == 0 @@ -815,9 +795,7 @@ def convert_grid(self, factor): angrot=self.angrot, ) else: - raise AssertionError( - "Grid is not complete and cannot be converted" - ) + raise AssertionError("Grid is not complete and cannot be converted") ############### ### Methods ### @@ -930,9 +908,7 @@ def intersect(self, x, y, z=None, local=False, forgive=False): if forgive: col = np.nan else: - raise Exception( - "x, y point given is outside of the model area" - ) + raise Exception("x, y point given is outside of the model area") else: col = np.asarray(xcomp).nonzero()[0][-1] @@ -941,9 +917,7 @@ def intersect(self, x, y, z=None, local=False, forgive=False): if forgive: row = np.nan else: - raise Exception( - "x, y point given is outside of the model area" - ) + raise Exception("x, y point given is outside of the model area") else: row = np.asarray(ycomp).nonzero()[0][-1] if np.any(np.isnan([row, col])): @@ -1022,9 +996,7 @@ def get_cell_vertices(self, *args, **kwargs): """ if kwargs: if args: - raise TypeError( - "mixed positional and keyword arguments not supported" - ) + raise TypeError("mixed positional and keyword arguments not supported") elif "node" in kwargs: _, i, j = self.get_lrc(kwargs.pop("node"))[0] elif "i" in kwargs and "j" in kwargs: @@ -1229,10 +1201,7 @@ def array_at_verts(self, a): zcenters = self.zcellcenters if self._idomain is not None: zcenters = np.where(inactive, np.nan, zcenters) - if ( - not self.is_rectilinear - or np.count_nonzero(np.isnan(zcenters)) != 0 - ): + if not self.is_rectilinear or np.count_nonzero(np.isnan(zcenters)) != 0: zedges = np.nanmean(self.top_botm_withnan, axis=(1, 2)) else: zedges = self.top_botm_withnan[:, 0, 0] @@ -1289,9 +1258,7 @@ def array_at_verts(self, a): xyoutput[:, 0] = youtput[0, :, :].ravel() xyoutput[:, 1] = xoutput[0, :, :].ravel() averts2d = interp_func(xyoutput) - averts2d = averts2d.reshape( - (1, self.nrow + 1, self.ncol + 1) - ) + averts2d = averts2d.reshape((1, self.nrow + 1, self.ncol + 1)) averts = averts2d * np.ones(shape_verts) elif self.nrow == 1: # in this case we need a 2d interpolation in the x, z plane @@ -1307,9 +1274,7 @@ def array_at_verts(self, a): xzoutput[:, 0] = zoutput[:, 0, :].ravel() xzoutput[:, 1] = xoutput[:, 0, :].ravel() averts2d = interp_func(xzoutput) - averts2d = averts2d.reshape( - (self.nlay + 1, 1, self.ncol + 1) - ) + averts2d = averts2d.reshape((self.nlay + 1, 1, self.ncol + 1)) averts = averts2d * np.ones(shape_verts) elif self.ncol == 1: # in this case we need a 2d interpolation in the y, z plane @@ -1325,9 +1290,7 @@ def array_at_verts(self, a): yzoutput[:, 0] = zoutput[:, :, 0].ravel() yzoutput[:, 1] = youtput[:, :, 0].ravel() averts2d = interp_func(yzoutput) - averts2d = averts2d.reshape( - (self.nlay + 1, self.nrow + 1, 1) - ) + averts2d = averts2d.reshape((self.nlay + 1, self.nrow + 1, 1)) averts = averts2d * np.ones(shape_verts) else: # 3d interpolation diff --git a/flopy/discretization/unstructuredgrid.py b/flopy/discretization/unstructuredgrid.py index ed0a201930..18d7b1ce97 100644 --- a/flopy/discretization/unstructuredgrid.py +++ b/flopy/discretization/unstructuredgrid.py @@ -273,18 +273,14 @@ def cell2d(self): @property def iverts(self): if self._iverts is not None: - return [ - [ivt for ivt in t if ivt is not None] for t in self._iverts - ] + return [[ivt for ivt in t if ivt is not None] for t in self._iverts] @property def verts(self): if self._vertices is None: return self._vertices else: - verts = np.array( - [list(t)[1:] for t in self._vertices], dtype=float - ).T + verts = np.array([list(t)[1:] for t in self._vertices], dtype=float).T x, y = transform( verts[0], verts[1], @@ -578,8 +574,7 @@ def map_polygons(self): self._polygons[ilay].append(p) else: self._polygons = [ - Path(self.get_cell_vertices(nn)) - for nn in range(self.ncpl[0]) + Path(self.get_cell_vertices(nn)) for nn in range(self.ncpl[0]) ] return copy.copy(self._polygons) @@ -650,10 +645,7 @@ def convert_grid(self, factor): """ if self.is_complete: return UnstructuredGrid( - vertices=[ - [i[0], i[1] * factor, i[2] * factor] - for i in self._vertices - ], + vertices=[[i[0], i[1] * factor, i[2] * factor] for i in self._vertices], iverts=self._iverts, xcenters=self._xc * factor, ycenters=self._yc * factor, @@ -665,9 +657,7 @@ def convert_grid(self, factor): angrot=self.angrot, ) else: - raise AssertionError( - "Grid is not complete and cannot be converted" - ) + raise AssertionError("Grid is not complete and cannot be converted") def clean_iverts(self, inplace=False): """ @@ -877,9 +867,7 @@ def _build_grid_geometry_info(self): xvertices = xvertxform yvertices = yvertxform - self._cache_dict[cache_index_cc] = CachedData( - [xcenters, ycenters, zcenters] - ) + self._cache_dict[cache_index_cc] = CachedData([xcenters, ycenters, zcenters]) self._cache_dict[cache_index_vert] = CachedData( [xvertices, yvertices, zvertices] ) @@ -1149,9 +1137,7 @@ def from_gridspec(cls, file_path: Union[str, os.PathLike]): with open(file_path) as file: def split_line(): - return [ - head.upper() for head in file.readline().strip().split() - ] + return [head.upper() for head in file.readline().strip().split()] header = split_line() while header[0][0] == "#": @@ -1194,13 +1180,8 @@ def split_line(): f"Cell {nn} declares {verts_declared} vertices but provides {verts_provided}" ) - verts = [ - int(vert) - 1 for vert in line[6 : 6 + verts_declared] - ] - elevs = [ - zverts[int(line[i]) - 1] - for i in range(6, 6 + verts_declared) - ] + verts = [int(vert) - 1 for vert in line[6 : 6 + verts_declared]] + elevs = [zverts[int(line[i]) - 1] for i in range(6, 6 + verts_declared)] xcenters.append(xc) ycenters.append(yc) diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index c45c4e245e..f55d8a306a 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -167,16 +167,12 @@ def iverts(self): @property def cell1d(self): if self._cell1d is not None: - return [ - [ivt for ivt in t if ivt is not None] for t in self._cell1d - ] + return [[ivt for ivt in t if ivt is not None] for t in self._cell1d] @property def cell2d(self): if self._cell2d is not None: - return [ - [ivt for ivt in t if ivt is not None] for t in self._cell2d - ] + return [[ivt for ivt in t if ivt is not None] for t in self._cell2d] @property def verts(self): @@ -241,9 +237,7 @@ def grid_lines(self): ] ) if close_cell: - lines.append( - [(xcoords[-1], ycoords[-1]), (xcoords[0], ycoords[0])] - ) + lines.append([(xcoords[-1], ycoords[-1]), (xcoords[0], ycoords[0])]) self._copy_cache = True return lines @@ -336,13 +330,9 @@ def convert_grid(self, factor): """ if self.is_complete: return VertexGrid( - vertices=[ - [i[0], i[1] * factor, i[2] * factor] - for i in self._vertices - ], + vertices=[[i[0], i[1] * factor, i[2] * factor] for i in self._vertices], cell2d=[ - [i[0], i[1] * factor, i[2] * factor] + i[3:] - for i in self._cell2d + [i[0], i[1] * factor, i[2] * factor] + i[3:] for i in self._cell2d ], top=self.top * factor, botm=self.botm * factor, @@ -352,9 +342,7 @@ def convert_grid(self, factor): angrot=self.angrot, ) else: - raise AssertionError( - "Grid is not complete and cannot be converted" - ) + raise AssertionError("Grid is not complete and cannot be converted") def intersect(self, x, y, z=None, local=False, forgive=False): """ diff --git a/flopy/export/metadata.py b/flopy/export/metadata.py index edb47af451..6c80943309 100644 --- a/flopy/export/metadata.py +++ b/flopy/export/metadata.py @@ -64,20 +64,14 @@ def __init__(self, sciencebase_id, model): self.naming_authority = "ScienceBase" # org. that provides the id # Well-behaved generic netCDF applications should append a line containing: # date, time of day, user name, program name and command arguments. - self.source = ( - model.model_ws - ) # The method of production of the original data. + self.source = model.model_ws # The method of production of the original data. # If it was model-generated, source should name the model and its version. # This attribute is defined in the CF Conventions. self.acknowledgement = self._get_xml_attribute("datacred") - self.date_created = self.sb["provenance"]["linkProcess"].get( - "dateCreated" - ) + self.date_created = self.sb["provenance"]["linkProcess"].get("dateCreated") self.creator_name = self.creator.get("name") self.creator_email = self.creator.get("email") - self.creator_institution = self.creator["organization"].get( - "displayText" - ) + self.creator_institution = self.creator["organization"].get("displayText") self.institution = ( self.creator_institution ) # also in CF convention for global attributes @@ -87,9 +81,7 @@ def __init__(self, sciencebase_id, model): for d in self.sb["contacts"] if "publisher" in d.get("type").lower() ][0] - self.publisher_email = self.sb["provenance"]["linkProcess"].get( - "processedBy" - ) + self.publisher_email = self.sb["provenance"]["linkProcess"].get("processedBy") self.publisher_url = "https://www2.usgs.gov/water/" # self.sb['provenance']['linkProcess'].get('linkReference') self.geospatial_bounds_crs = "EPSG:4326" self.geospatial_lat_min = self.bounds.get("minY") @@ -122,9 +114,7 @@ def bounds(self): @property def creator(self): return [ - d - for d in self.sb["contacts"] - if "point of contact" in d["type"].lower() + d for d in self.sb["contacts"] if "point of contact" in d["type"].lower() ][0] @property @@ -172,9 +162,7 @@ def references(self): """ r = [self.citation] links = [ - d.get("uri") - for d in self.sb["webLinks"] - if "link" in d.get("type").lower() + d.get("uri") for d in self.sb["webLinks"] if "link" in d.get("type").lower() ] return r + links @@ -190,9 +178,7 @@ def time_coverage(self): l = self.sb["dates"] tc = {} for t in ["start", "end"]: - tc[t] = [d.get("dateString") for d in l if t in d["type"].lower()][ - 0 - ] + tc[t] = [d.get("dateString") for d in l if t in d["type"].lower()][0] if not np.all(self.model_time.steady_state) and pd is not None: # replace with times from model reference tc["start"] = self.model_time.start_datetime diff --git a/flopy/export/netcdf.py b/flopy/export/netcdf.py index f93469f4e2..54a168478d 100644 --- a/flopy/export/netcdf.py +++ b/flopy/export/netcdf.py @@ -188,9 +188,7 @@ def __init__( self.dimension_names = ("layer", "y", "x") STANDARD_VARS.extend(["delc", "delr"]) else: - raise Exception( - f"Grid type {self.model_grid.grid_type} not supported." - ) + raise Exception(f"Grid type {self.model_grid.grid_type} not supported.") self.shape = self.model_grid.shape parser = import_optional_dependency("dateutil.parser") @@ -201,9 +199,7 @@ def __init__( crs = get_authority_crs(self.model_grid.crs) if crs is None: - self.logger.warn( - "model has no coordinate reference system specified. " - ) + self.logger.warn("model has no coordinate reference system specified. ") self.model_crs = crs self.transformer = None self.grid_units = self.model_grid.units @@ -247,9 +243,7 @@ def __init__( } for n, v in spatial_attribs.items(): self.global_attributes["flopy_sr_" + n] = v - self.global_attributes["start_datetime"] = ( - self.model_time.start_datetime - ) + self.global_attributes["start_datetime"] = self.model_time.start_datetime self.fillvalue = FILLVALUE @@ -279,18 +273,14 @@ def __add__(self, other): new_net = NetCdf.zeros_like(self) if np.isscalar(other) or isinstance(other, np.ndarray): for vname in self.var_attr_dict.keys(): - new_net.nc.variables[vname][:] = ( - self.nc.variables[vname][:] + other - ) + new_net.nc.variables[vname][:] = self.nc.variables[vname][:] + other elif isinstance(other, NetCdf): for vname in self.var_attr_dict.keys(): new_net.nc.variables[vname][:] = ( self.nc.variables[vname][:] + other.nc.variables[vname][:] ) else: - raise Exception( - f"NetCdf.__add__(): unrecognized other:{type(other)}" - ) + raise Exception(f"NetCdf.__add__(): unrecognized other:{type(other)}") new_net.nc.sync() return new_net @@ -298,18 +288,14 @@ def __sub__(self, other): new_net = NetCdf.zeros_like(self) if np.isscalar(other) or isinstance(other, np.ndarray): for vname in self.var_attr_dict.keys(): - new_net.nc.variables[vname][:] = ( - self.nc.variables[vname][:] - other - ) + new_net.nc.variables[vname][:] = self.nc.variables[vname][:] - other elif isinstance(other, NetCdf): for vname in self.var_attr_dict.keys(): new_net.nc.variables[vname][:] = ( self.nc.variables[vname][:] - other.nc.variables[vname][:] ) else: - raise Exception( - f"NetCdf.__sub__(): unrecognized other:{type(other)}" - ) + raise Exception(f"NetCdf.__sub__(): unrecognized other:{type(other)}") new_net.nc.sync() return new_net @@ -317,18 +303,14 @@ def __mul__(self, other): new_net = NetCdf.zeros_like(self) if np.isscalar(other) or isinstance(other, np.ndarray): for vname in self.var_attr_dict.keys(): - new_net.nc.variables[vname][:] = ( - self.nc.variables[vname][:] * other - ) + new_net.nc.variables[vname][:] = self.nc.variables[vname][:] * other elif isinstance(other, NetCdf): for vname in self.var_attr_dict.keys(): new_net.nc.variables[vname][:] = ( self.nc.variables[vname][:] * other.nc.variables[vname][:] ) else: - raise Exception( - f"NetCdf.__mul__(): unrecognized other:{type(other)}" - ) + raise Exception(f"NetCdf.__mul__(): unrecognized other:{type(other)}") new_net.nc.sync() return new_net @@ -340,19 +322,14 @@ def __truediv__(self, other): with np.errstate(invalid="ignore"): if np.isscalar(other) or isinstance(other, np.ndarray): for vname in self.var_attr_dict.keys(): - new_net.nc.variables[vname][:] = ( - self.nc.variables[vname][:] / other - ) + new_net.nc.variables[vname][:] = self.nc.variables[vname][:] / other elif isinstance(other, NetCdf): for vname in self.var_attr_dict.keys(): new_net.nc.variables[vname][:] = ( - self.nc.variables[vname][:] - / other.nc.variables[vname][:] + self.nc.variables[vname][:] / other.nc.variables[vname][:] ) else: - raise Exception( - f"NetCdf.__sub__(): unrecognized other:{type(other)}" - ) + raise Exception(f"NetCdf.__sub__(): unrecognized other:{type(other)}") new_net.nc.sync() return new_net @@ -420,9 +397,7 @@ def nc_crs(self): return get_authority_crs(self.nc_crs_str) @classmethod - def zeros_like( - cls, other, output_filename=None, verbose=None, logger=None - ): + def zeros_like(cls, other, output_filename=None, verbose=None, logger=None): new_net = NetCdf.empty_like( other, output_filename=output_filename, @@ -432,9 +407,7 @@ def zeros_like( # add the vars to the instance for vname in other.var_attr_dict.keys(): if new_net.nc.variables.get(vname) is not None: - new_net.logger.warn( - f"variable {vname} already defined, skipping" - ) + new_net.logger.warn(f"variable {vname} already defined, skipping") continue new_net.log(f"adding variable {vname}") var = other.nc.variables[vname] @@ -463,19 +436,13 @@ def zeros_like( return new_net @classmethod - def empty_like( - cls, other, output_filename=None, verbose=None, logger=None - ): + def empty_like(cls, other, output_filename=None, verbose=None, logger=None): if output_filename is None: - output_filename = ( - str(time.mktime(datetime.now().timetuple())) + ".nc" - ) + output_filename = str(time.mktime(datetime.now().timetuple())) + ".nc" while os.path.exists(output_filename): print(f"{output_filename}...already exists") - output_filename = ( - str(time.mktime(datetime.now().timetuple())) + ".nc" - ) + output_filename = str(time.mktime(datetime.now().timetuple())) + ".nc" print("creating temporary netcdf file..." + output_filename) new_net = cls( @@ -487,9 +454,7 @@ def empty_like( ) return new_net - def difference( - self, other, minuend="self", mask_zero_diff=True, onlydiff=True - ): + def difference(self, other, minuend="self", mask_zero_diff=True, onlydiff=True): """ make a new NetCDF instance that is the difference with another netcdf file @@ -540,8 +505,7 @@ def difference( diff = self_vars.symmetric_difference(other_vars) if len(diff) > 0: self.logger.warn( - "variables are not the same between the two nc files: " - + ",".join(diff) + "variables are not the same between the two nc files: " + ",".join(diff) ) return @@ -607,9 +571,7 @@ def difference( # check for non-zero diffs if onlydiff and d_data.sum() == 0.0: - self.logger.warn( - f"var {vname} has zero differences, skipping..." - ) + self.logger.warn(f"var {vname} has zero differences, skipping...") continue self.logger.warn( @@ -645,9 +607,7 @@ def difference( def write(self): """write the nc object to disk""" self.log("writing nc file") - assert ( - self.nc is not None - ), "netcdf.write() error: nc file not initialized" + assert self.nc is not None, "netcdf.write() error: nc file not initialized" # write any new attributes that have been set since # initializing the file @@ -671,9 +631,7 @@ def initialize_geometry(self): # Check if using newer pyproj version conventions if version.parse(pyproj.__version__) < version.parse("2.2"): - raise ValueError( - "The FloPy NetCDF module requires pyproj >= 2.2.0." - ) + raise ValueError("The FloPy NetCDF module requires pyproj >= 2.2.0.") print("initialize_geometry::") @@ -705,9 +663,7 @@ def initialize_geometry(self): self.xs, self.ys = self.transformer.transform(xs, ys) # get transformed bounds and record to check against ScienceBase later - bbox = np.array( - [[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]] - ) + bbox = np.array([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]]) x, y = self.transformer.transform(*bbox.transpose()) self.bounds = x.min(), y.min(), x.max(), y.max() else: @@ -1010,9 +966,7 @@ def initialize_group( f"{dim} information must be supplied to dimension data" ) else: - self.nc.groups[group].createDimension( - dim, len(dimension_data[dim]) - ) + self.nc.groups[group].createDimension(dim, len(dimension_data[dim])) self.log(f"created {group} group dimensions") @@ -1020,9 +974,7 @@ def initialize_group( for dim in dimensions: if dim.lower() == "time": if "time" not in attributes: - unit_value = ( - f"{self.time_units} since {self.start_datetime}" - ) + unit_value = f"{self.time_units} since {self.start_datetime}" attribs = { "units": unit_value, "standard_name": "time", @@ -1116,22 +1068,15 @@ def create_group_variable( """ name = self.normalize_name(name) - if ( - name in STANDARD_VARS - and name in self.nc.groups[group].variables.keys() - ): + if name in STANDARD_VARS and name in self.nc.groups[group].variables.keys(): return if name in self.nc.groups[group].variables.keys(): if self.forgive: - self.logger.warn( - f"skipping duplicate {group} group variable: {name}" - ) + self.logger.warn(f"skipping duplicate {group} group variable: {name}") return else: - raise Exception( - f"duplicate {group} group variable name: {name}" - ) + raise Exception(f"duplicate {group} group variable name: {name}") self.log(f"creating group {group} variable: {name}") @@ -1213,10 +1158,7 @@ def create_variable( # long_name = attributes.pop("long_name",name) if name in STANDARD_VARS and name in self.nc.variables.keys(): return - if ( - name not in self.var_attr_dict.keys() - and name in self.nc.variables.keys() - ): + if name not in self.var_attr_dict.keys() and name in self.nc.variables.keys(): if self.forgive: self.logger.warn(f"skipping duplicate variable: {name}") return diff --git a/flopy/export/shapefile_utils.py b/flopy/export/shapefile_utils.py index cf976871a3..bef3bbcb65 100644 --- a/flopy/export/shapefile_utils.py +++ b/flopy/export/shapefile_utils.py @@ -114,9 +114,7 @@ def write_grid_shapefile( ) elif mg.grid_type == "structured": verts = [ - mg.get_cell_vertices(i, j) - for i in range(mg.nrow) - for j in range(mg.ncol) + mg.get_cell_vertices(i, j) for i in range(mg.nrow) for j in range(mg.ncol) ] elif mg.grid_type == "vertex": verts = [mg.get_cell_vertices(cellid) for cellid in range(mg.ncpl)] @@ -184,9 +182,7 @@ def write_grid_shapefile( istart, istop = mg.get_layer_node_range(ilay) layer[istart:istop] = ilay + 1 at = np.vstack( - [node] - + [layer] - + [array_dict[name].ravel() for name in names[2:]] + [node] + [layer] + [array_dict[name].ravel() for name in names[2:]] ).transpose() names = enforce_10ch_limit(names) @@ -197,9 +193,7 @@ def write_grid_shapefile( at = np.array([tuple(i) for i in at], dtype=dtypes) # write field information - fieldinfo = { - name: get_pyshp_field_info(dtype.name) for name, dtype in dtypes - } + fieldinfo = {name: get_pyshp_field_info(dtype.name) for name, dtype in dtypes} for n in names: w.field(n, *fieldinfo[n]) @@ -308,11 +302,7 @@ def model_attributes_to_shapefile( attrs.remove("start_datetime") for attr in attrs: a = pak.__getattribute__(attr) - if ( - a is None - or not hasattr(a, "data_type") - or a.name == "thickness" - ): + if a is None or not hasattr(a, "data_type") or a.name == "thickness": continue if a.data_type == DataType.array2d: if a.array is None or a.array.shape != horz_shape: @@ -421,9 +411,7 @@ def model_attributes_to_shapefile( ): for ilay in range(a.model.modelgrid.nlay): u2d = a[ilay] - name = ( - f"{shape_attr_name(u2d.name)}_{ilay + 1}" - ) + name = f"{shape_attr_name(u2d.name)}_{ilay + 1}" arr = u2d.array assert arr.shape == horz_shape array_dict[name] = arr @@ -567,14 +555,10 @@ def shp2recarray(shpname: Union[str, os.PathLike]): sf = import_optional_dependency("shapefile") sfobj = sf.Reader(str(shpname)) - dtype = [ - (str(f[0]), get_pyshp_field_dtypes(f[1])) for f in sfobj.fields[1:] - ] + dtype = [(str(f[0]), get_pyshp_field_dtypes(f[1])) for f in sfobj.fields[1:]] geoms = GeoSpatialCollection(sfobj).flopy_geometry - records = [ - tuple(r) + (geoms[i],) for i, r in enumerate(sfobj.iterRecords()) - ] + records = [tuple(r) + (geoms[i],) for i, r in enumerate(sfobj.iterRecords())] dtype += [("geometry", object)] recarray = np.array(records, dtype=dtype).view(np.recarray) @@ -636,9 +620,7 @@ def recarray2shp( from ..utils.geospatial_utils import GeoSpatialCollection if len(recarray) != len(geoms): - raise IndexError( - "Number of geometries must equal the number of records!" - ) + raise IndexError("Number of geometries must equal the number of records!") if len(recarray) == 0: raise Exception("Recarray is empty") diff --git a/flopy/export/utils.py b/flopy/export/utils.py index 349dc0d6a3..9aef5ff987 100644 --- a/flopy/export/utils.py +++ b/flopy/export/utils.py @@ -47,9 +47,7 @@ def ensemble_helper( """ f_in, f_out = None, None for m in models[1:]: - assert ( - m.get_nrow_ncol_nlay_nper() == models[0].get_nrow_ncol_nlay_nper() - ) + assert m.get_nrow_ncol_nlay_nper() == models[0].get_nrow_ncol_nlay_nper() if inputs_filename is not None: f_in = models[0].export(inputs_filename, **kwargs) vdict = {} @@ -129,9 +127,7 @@ def ensemble_helper( if i >= 2: if not add_reals: f_out.write() - f_out = NetCdf.empty_like( - mean, output_filename=outputs_filename - ) + f_out = NetCdf.empty_like(mean, output_filename=outputs_filename) f_out.append(mean, suffix="**mean**") f_out.append(stdev, suffix="**stdev**") @@ -156,9 +152,7 @@ def _add_output_nc_variable( if logger: logger.log(f"creating array for {var_name}") - array = np.zeros( - (len(times), shape3d[0], shape3d[1], shape3d[2]), dtype=np.float32 - ) + array = np.zeros((len(times), shape3d[0], shape3d[1], shape3d[2]), dtype=np.float32) array[:] = np.nan if isinstance(out_obj, ZBNetOutput): @@ -405,12 +399,8 @@ def output_helper( elif verbose: print(msg) times = [t for t in common_times[::stride]] - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".nc": - f = NetCdf( - f, ml, time_values=times, logger=logger, forgive=forgive, **kwargs - ) + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc": + f = NetCdf(f, ml, time_values=times, logger=logger, forgive=forgive, **kwargs) elif isinstance(f, NetCdf): otimes = list(f.nc.variables["time"][:]) assert otimes == times @@ -500,9 +490,7 @@ def output_helper( pass for text, array in zonebud.arrays.items(): - _add_output_nc_zonebudget_variable( - f, array, text, zonebud.flux, logger - ) + _add_output_nc_zonebudget_variable(f, array, text, zonebud.flux, logger) # write the zone array to standard output _add_output_nc_variable( @@ -530,9 +518,7 @@ def output_helper( attrib_name = "conc" else: attrib_name = "head" - plotarray = np.atleast_3d( - out_obj.get_alldata().transpose() - ).transpose() + plotarray = np.atleast_3d(out_obj.get_alldata().transpose()).transpose() for per in range(plotarray.shape[0]): for k in range(plotarray.shape[1]): @@ -581,9 +567,7 @@ def output_helper( return f -def model_export( - f: Union[str, os.PathLike, NetCdf, dict], ml, fmt=None, **kwargs -): +def model_export(f: Union[str, os.PathLike, NetCdf, dict], ml, fmt=None, **kwargs): """ Method to export a model to a shapefile or netcdf file @@ -616,14 +600,10 @@ def model_export( if package_names is None: package_names = [pak.name[0] for pak in ml.packagelist] - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".nc": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc": f = NetCdf(f, ml, **kwargs) - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".shp": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp": shapefile_utils.model_attributes_to_shapefile( f, ml, package_names=package_names, **kwargs ) @@ -661,9 +641,7 @@ def model_export( smooth=smooth, point_scalars=point_scalars, ) - vtkobj.add_model( - ml, masked_values=masked_values, selpaklist=package_names - ) + vtkobj.add_model(ml, masked_values=masked_values, selpaklist=package_names) vtkobj.write(os.path.join(f, name), kpers) else: @@ -710,14 +688,10 @@ def package_export( """ assert isinstance(pak, PackageInterface) - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".nc": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc": f = NetCdf(f, pak.parent, **kwargs) - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".shp": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp": shapefile_utils.model_attributes_to_shapefile( f, pak.parent, package_names=pak.name, verbose=verbose, **kwargs ) @@ -808,9 +782,7 @@ def generic_array_export( flopy model object """ - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".nc": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc": assert "model" in kwargs.keys(), ( "creating a new netCDF using generic_array_helper requires a " "'model' kwarg" @@ -833,8 +805,7 @@ def generic_array_export( long_name = kwargs.pop("long_name", var_name) if len(kwargs) > 0: f.logger.warn( - "generic_array_helper(): unrecognized kwargs:" - + ",".join(kwargs.keys()) + "generic_array_helper(): unrecognized kwargs:" + ",".join(kwargs.keys()) ) attribs = {"long_name": long_name} attribs["coordinates"] = coords @@ -887,24 +858,17 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs): """ if not isinstance(mfl, (DataListInterface, DataInterface)): - err = ( - "mflist_helper only helps instances that support " - "DataListInterface" - ) + err = "mflist_helper only helps instances that support DataListInterface" raise AssertionError(err) modelgrid = mfl.model.modelgrid if "modelgrid" in kwargs: modelgrid = kwargs.pop("modelgrid") - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".nc": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc": f = NetCdf(f, mfl.model, **kwargs) - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".shp": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp": sparse = kwargs.get("sparse", False) kper = kwargs.get("kper", 0) squeeze = kwargs.get("squeeze", True) @@ -968,8 +932,7 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs): break # Skip mflist if all elements are of object type if all( - dtype == np.object_ - for dtype, _ in mfl.data[kper].dtype.fields.values() + dtype == np.object_ for dtype, _ in mfl.data[kper].dtype.fields.values() ): return f @@ -982,9 +945,7 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs): units = None if var_name in NC_UNITS_FORMAT: - units = NC_UNITS_FORMAT[var_name].format( - f.grid_units, f.time_units - ) + units = NC_UNITS_FORMAT[var_name].format(f.grid_units, f.time_units) precision_str = NC_PRECISION_TYPE[mfl.dtype[name].type] if var_name in NC_LONG_NAMES: attribs = {"long_name": NC_LONG_NAMES[var_name]} @@ -1046,10 +1007,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs): """ if not isinstance(t2d, DataInterface): - err = ( - "transient2d_helper only helps instances that support " - "DataInterface" - ) + err = "transient2d_helper only helps instances that support DataInterface" raise AssertionError(err) min_valid = kwargs.get("min_valid", -1.0e9) @@ -1059,14 +1017,10 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs): if "modelgrid" in kwargs: modelgrid = kwargs.pop("modelgrid") - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".nc": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc": f = NetCdf(f, t2d.model, **kwargs) - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".shp": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp": array_dict = {} for kper in range(t2d.model.modeltime.nper): u2d = t2d[kper] @@ -1105,9 +1059,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs): units = "unitless" if var_name in NC_UNITS_FORMAT: - units = NC_UNITS_FORMAT[var_name].format( - f.grid_units, f.time_units - ) + units = NC_UNITS_FORMAT[var_name].format(f.grid_units, f.time_units) try: precision_str = NC_PRECISION_TYPE[t2d.dtype] except: @@ -1208,14 +1160,10 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs): if "modelgrid" in kwargs: modelgrid = kwargs.pop("modelgrid") - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".nc": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc": f = NetCdf(f, u3d.model, **kwargs) - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".shp": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp": array_dict = {} array_shape = u3d.array.shape @@ -1277,9 +1225,7 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs): array[np.isnan(array)] = f.fillvalue units = "unitless" if var_name in NC_UNITS_FORMAT: - units = NC_UNITS_FORMAT[var_name].format( - f.grid_units, f.time_units - ) + units = NC_UNITS_FORMAT[var_name].format(f.grid_units, f.time_units) precision_str = NC_PRECISION_TYPE[u3d.dtype] if var_name in NC_LONG_NAMES: attribs = {"long_name": NC_LONG_NAMES[var_name]} @@ -1338,9 +1284,7 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs): raise NotImplementedError(f"unrecognized export argument:{f}") -def array2d_export( - f: Union[str, os.PathLike], u2d, fmt=None, verbose=False, **kwargs -): +def array2d_export(f: Union[str, os.PathLike], u2d, fmt=None, verbose=False, **kwargs): """ export helper for Util2d instances @@ -1373,14 +1317,10 @@ def array2d_export( if "modelgrid" in kwargs: modelgrid = kwargs.pop("modelgrid") - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".nc": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc": f = NetCdf(f, u2d.model, **kwargs) - if (isinstance(f, str) or isinstance(f, Path)) and Path( - f - ).suffix.lower() == ".shp": + if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".shp": name = shapefile_utils.shape_attr_name(u2d.name, keep_layer=True) shapefile_utils.write_grid_shapefile( f, modelgrid, {name: u2d.array}, verbose=verbose @@ -1428,9 +1368,7 @@ def array2d_export( units = "unitless" if var_name in NC_UNITS_FORMAT: - units = NC_UNITS_FORMAT[var_name].format( - f.grid_units, f.time_units - ) + units = NC_UNITS_FORMAT[var_name].format(f.grid_units, f.time_units) precision_str = NC_PRECISION_TYPE[u2d.dtype] if var_name in NC_LONG_NAMES: attribs = {"long_name": NC_LONG_NAMES[var_name]} @@ -1543,9 +1481,7 @@ def export_array( filename = str(filename) if filename.lower().endswith(".asc"): if ( - len(np.unique(modelgrid.delr)) - != len(np.unique(modelgrid.delc)) - != 1 + len(np.unique(modelgrid.delr)) != len(np.unique(modelgrid.delc)) != 1 or modelgrid.delr[0] != modelgrid.delc[0] ): raise ValueError("Arc ascii arrays require a uniform grid.") @@ -1569,9 +1505,7 @@ def export_array( cellsize = np.max((dx, dy)) xoffset, yoffset = xmin, ymin - filename = ( - ".".join(filename.split(".")[:-1]) + ".asc" - ) # enforce .asc ending + filename = ".".join(filename.split(".")[:-1]) + ".asc" # enforce .asc ending nrow, ncol = a.shape a[np.isnan(a)] = nodata txt = f"ncols {ncol}\n" @@ -1590,9 +1524,7 @@ def export_array( elif filename.lower().endswith(".tif"): if ( - len(np.unique(modelgrid.delr)) - != len(np.unique(modelgrid.delc)) - != 1 + len(np.unique(modelgrid.delr)) != len(np.unique(modelgrid.delc)) != 1 or modelgrid.delr[0] != modelgrid.delc[0] ): raise ValueError("GeoTIFF export require a uniform grid.") @@ -1760,9 +1692,7 @@ def export_contours( recarray2shp(ra, geoms, filename, **kwargs) -def export_contourf( - filename, contours, fieldname="level", verbose=False, **kwargs -): +def export_contourf(filename, contours, fieldname="level", verbose=False, **kwargs): """ Write matplotlib filled contours to shapefile. diff --git a/flopy/export/vtk.py b/flopy/export/vtk.py index e25c94695e..579a2990ca 100644 --- a/flopy/export/vtk.py +++ b/flopy/export/vtk.py @@ -141,9 +141,7 @@ def __init__( vtk = import_optional_dependency("vtk") if model is None and modelgrid is None: - raise AssertionError( - "A model or modelgrid must be provided to use Vtk" - ) + raise AssertionError("A model or modelgrid must be provided to use Vtk") elif model is not None: self.modelgrid = model.modelgrid @@ -421,10 +419,7 @@ def _build_grid_geometry(self): adji = (adjk * self.ncpl) + i zv = self.top[adji] * self.vertical_exageration else: - zv = ( - self.botm[adjk - 1][i] - * self.vertical_exageration - ) + zv = self.botm[adjk - 1][i] * self.vertical_exageration points.append([xv, yv, zv]) v1 += 1 @@ -436,13 +431,9 @@ def _build_grid_geometry(self): for v in range(v0, v1): if v != v1 - 1: - cell_faces.append( - [v + 1, v, v + self.nvpl, v + self.nvpl + 1] - ) + cell_faces.append([v + 1, v, v + self.nvpl, v + self.nvpl + 1]) else: - cell_faces.append( - [v0, v, v + self.nvpl, v0 + self.nvpl] - ) + cell_faces.append([v0, v, v + self.nvpl, v0 + self.nvpl]) v0 = v1 faces.append(cell_faces) @@ -574,9 +565,7 @@ def _build_hfbs(self, pkg): pts = [] for v in v1: - ix = np.asarray( - (v2.T[0] == v[0]) & (v2.T[1] == v[1]) - ).nonzero() + ix = np.asarray((v2.T[0] == v[0]) & (v2.T[1] == v[1])).nonzero() if len(ix[0]) > 0 and len(pts) < 2: pts.append(v2[ix[0][0]]) @@ -614,9 +603,7 @@ def _build_hfbs(self, pkg): polygon.GetPointIds().SetNumberOfIds(4) for ix, iv in enumerate(face): polygon.GetPointIds().SetId(ix, iv) - polydata.InsertNextCell( - polygon.GetCellType(), polygon.GetPointIds() - ) + polydata.InsertNextCell(polygon.GetCellType(), polygon.GetPointIds()) # and then set the hydchr data vtk_arr = numpy_support.numpy_to_vtk( @@ -820,9 +807,7 @@ def add_transient_array(self, d, name=None, masked_values=None): transient[kper] = array else: if name is None: - raise ValueError( - "name must be specified when providing numpy arrays" - ) + raise ValueError("name must be specified when providing numpy arrays") for kper, trarray in d.items(): if trarray.size != self.nnodes: array = np.zeros(self.nnodes) * np.nan @@ -911,9 +896,7 @@ def add_vector(self, vector, name, masked_values=None): tv[ix, : self.ncpl] = q vector = tv else: - raise AssertionError( - "Size of vector must be 3 * nnodes or 3 * ncpl" - ) + raise AssertionError("Size of vector must be 3 * nnodes or 3 * ncpl") else: vector = np.reshape(vector, (3, self.nnodes)) @@ -967,10 +950,7 @@ def add_transient_vector(self, d, name, masked_values=None): if not isinstance(value, np.ndarray): value = np.array(value) - if ( - value.size != 3 * self.ncpl - or value.size != 3 * self.nnodes - ): + if value.size != 3 * self.ncpl or value.size != 3 * self.nnodes: raise AssertionError( "Size of vector must be 3 * nnodes or 3 * ncpl" ) @@ -1106,11 +1086,7 @@ def add_pathline_points(self, pathlines, timeseries=False): if len(pathlines) == 0: return pathlines = [ - ( - pl.to_records(index=False) - if isinstance(pl, pd.DataFrame) - else pl - ) + (pl.to_records(index=False) if isinstance(pl, pd.DataFrame) else pl) for pl in pathlines ] fields = pathlines[0].dtype.names @@ -1135,9 +1111,7 @@ def add_pathline_points(self, pathlines, timeseries=False): } if all(k in pathlines.dtype.names for k in mpx_fields): pids = np.unique(pathlines.particleid) - pathlines = [ - pathlines[pathlines.particleid == pid] for pid in pids - ] + pathlines = [pathlines[pathlines.particleid == pid] for pid in pids] elif all(k in pathlines.dtype.names for k in prt_fields): pls = [] for imdl in np.unique(pathlines.imdl): @@ -1148,9 +1122,7 @@ def add_pathline_points(self, pathlines, timeseries=False): & (pathlines.iprp == iprp) & (pathlines.irpt == irpt) ] - pls.extend( - [pl[pl.trelease == t] for t in np.unique(pl.t)] - ) + pls.extend([pl[pl.trelease == t] for t in np.unique(pl.t)]) pathlines = pls else: raise ValueError("Unrecognized pathline dtype") @@ -1240,9 +1212,7 @@ def add_heads(self, hds, kstpkper=None, masked_values=None): self.add_transient_array(d, name=text, masked_values=masked_values) self.__transient_output_data = True - def add_cell_budget( - self, cbc, text=None, kstpkper=None, masked_values=None - ): + def add_cell_budget(self, cbc, text=None, kstpkper=None, masked_values=None): """ Method to add cell budget data to vtk @@ -1268,9 +1238,7 @@ def add_cell_budget( ) records = cbc.get_unique_record_names(decode=True) - imeth_dict = { - record: imeth for (record, imeth) in zip(records, cbc.imethlist) - } + imeth_dict = {record: imeth for (record, imeth) in zip(records, cbc.imethlist)} if text is None: keylist = records else: @@ -1304,8 +1272,7 @@ def add_cell_budget( if array.size < self.nnodes: if array.size < self.ncpl: raise AssertionError( - "Array size must be equal to " - "either ncpl or nnodes" + "Array size must be equal to either ncpl or nnodes" ) array = np.zeros(self.nnodes) * np.nan @@ -1366,9 +1333,7 @@ def _set_particle_track_data(self, points, lines=None, arrays=None): for ii in range(0, npts): poly.GetPointIds().SetId(ii, i) i += 1 - self.vtk_pathlines.InsertNextCell( - poly.GetCellType(), poly.GetPointIds() - ) + self.vtk_pathlines.InsertNextCell(poly.GetCellType(), poly.GetPointIds()) # create a vtkVertex for each point # necessary if arrays (time & particle ID) live on points? @@ -1471,9 +1436,7 @@ def write(self, f: Union[str, os.PathLike], kper=None): else: w.SetInputData(grid) - if ( - self.__transient_data or self.__transient_vector - ) and ix == 0: + if (self.__transient_data or self.__transient_vector) and ix == 0: if self.__transient_data: cnt = 0 for per, d in self.__transient_data.items(): diff --git a/flopy/mbase.py b/flopy/mbase.py index dbd65f5f33..f0bfbdaac9 100644 --- a/flopy/mbase.py +++ b/flopy/mbase.py @@ -44,9 +44,7 @@ iprn = -1 -def resolve_exe( - exe_name: Union[str, os.PathLike], forgive: bool = False -) -> str: +def resolve_exe(exe_name: Union[str, os.PathLike], forgive: bool = False) -> str: """ Resolves the absolute path of the executable, raising FileNotFoundError if the executable cannot be found (set forgive to True to return None and warn instead of raising an error). @@ -140,9 +138,7 @@ def add_file(self, fname, unit, binflag=False, output=False, package=None): ipop.append(idx) self.file_data.append( - FileDataEntry( - fname, unit, binflag=binflag, output=output, package=package - ) + FileDataEntry(fname, unit, binflag=binflag, output=output, package=package) ) return @@ -345,9 +341,9 @@ def _check(self, chk, level=1): if ( r is not None and r.summary_array is not None ): # currently SFR doesn't have one - chk.summary_array = np.append( - chk.summary_array, r.summary_array - ).view(np.recarray) + chk.summary_array = np.append(chk.summary_array, r.summary_array).view( + np.recarray + ) chk.passed += [ f"{r.package.name[0]} package: {psd}" for psd in r.passed ] @@ -403,9 +399,7 @@ def __init__( self._packagelist = [] self.heading = "" self.exe_name = ( - "mf2005" - if exe_name is None - else resolve_exe(exe_name, forgive=True) + "mf2005" if exe_name is None else resolve_exe(exe_name, forgive=True) ) self._verbose = verbose self.external_path = None @@ -669,9 +663,7 @@ def remove_package(self, pname): if iu in self.package_units: self.package_units.remove(iu) return - raise StopIteration( - "Package name " + pname + " not found in Package list" - ) + raise StopIteration("Package name " + pname + " not found in Package list") def __getattr__(self, item): """ @@ -729,11 +721,7 @@ def __getattr__(self, item): return None # to avoid infinite recursion - if ( - item == "_packagelist" - or item == "packagelist" - or item == "mfnam_packages" - ): + if item == "_packagelist" or item == "packagelist" or item == "mfnam_packages": raise AttributeError(item) pckg = self.get_package(item) if pckg is not None or item in self.mfnam_packages: @@ -890,9 +878,7 @@ def add_output( if self.verbose: self._output_msg(-1, add=True) - def remove_output( - self, fname: Optional[Union[str, os.PathLike]] = None, unit=None - ): + def remove_output(self, fname: Optional[Union[str, os.PathLike]] = None, unit=None): """ Remove an output file from the model by specifying either the file name or the unit number. @@ -927,9 +913,7 @@ def remove_output( msg = "either fname or unit must be passed to remove_output()" raise TypeError(msg) - def get_output( - self, fname: Optional[Union[str, os.PathLike]] = None, unit=None - ): + def get_output(self, fname: Optional[Union[str, os.PathLike]] = None, unit=None): """ Get an output file from the model by specifying either the file name or the unit number. @@ -989,8 +973,7 @@ def set_output_attribute( break else: raise TypeError( - "either fname or unit must be passed " - "to set_output_attribute()" + "either fname or unit must be passed to set_output_attribute()" ) if attr is not None: if idx is not None: @@ -1033,8 +1016,7 @@ def get_output_attribute( break else: raise TypeError( - "either fname or unit must be passed " - "to set_output_attribute()" + "either fname or unit must be passed to set_output_attribute()" ) v = None if attr is not None: @@ -1077,7 +1059,9 @@ def add_external( self.external_output.pop(idx) if unit in self.external_units: if self.verbose: - msg = f"BaseModel.add_external() warning: replacing existing unit {unit}" + msg = ( + f"BaseModel.add_external() warning: replacing existing unit {unit}" + ) print(msg) idx = self.external_units.index(unit) self.external_fnames.pop(idx) @@ -1300,9 +1284,7 @@ def change_model_ws( old_pth = self._model_ws self._model_ws = new_pth if self.verbose: - print( - f"\nchanging model workspace...\n {flopy_io.relpath_safe(new_pth)}" - ) + print(f"\nchanging model workspace...\n {flopy_io.relpath_safe(new_pth)}") # reset the paths for each package for pp in self.packagelist: pp.fn_path = os.path.join(self.model_ws, pp.file_name[0]) @@ -1311,9 +1293,7 @@ def change_model_ws( if ( hasattr(self, "external_path") and self.external_path is not None - and not os.path.exists( - os.path.join(self._model_ws, self.external_path) - ) + and not os.path.exists(os.path.join(self._model_ws, self.external_path)) ): pth = os.path.join(self._model_ws, self.external_path) os.makedirs(pth) @@ -1325,9 +1305,7 @@ def change_model_ws( def _reset_external(self, pth, old_pth): new_ext_fnames = [] - for ext_file, output in zip( - self.external_fnames, self.external_output - ): + for ext_file, output in zip(self.external_fnames, self.external_output): # this is a wicked mess if output: new_ext_file = ext_file @@ -1369,23 +1347,17 @@ def __setattr__(self, key, value): elif key == "model_ws": self.change_model_ws(value) elif key == "tr": - assert isinstance( - value, discretization.reference.TemporalReference - ) + assert isinstance(value, discretization.reference.TemporalReference) if self.dis is not None: self.dis.tr = value else: - raise Exception( - "cannot set TemporalReference - ModflowDis not found" - ) + raise Exception("cannot set TemporalReference - ModflowDis not found") elif key == "start_datetime": if self.dis is not None: self.dis.start_datetime = value self.tr.start_datetime = value else: - raise Exception( - "cannot set start_datetime - ModflowDis not found" - ) + raise Exception("cannot set start_datetime - ModflowDis not found") else: super().__setattr__(key, value) @@ -1581,9 +1553,7 @@ def check( if p.unit_number[i] in package_units.values(): duplicate_units[p.name[i]] = p.unit_number[i] otherpackage = [ - k - for k, v in package_units.items() - if v == p.unit_number[i] + k for k, v in package_units.items() if v == p.unit_number[i] ][0] duplicate_units[otherpackage] = p.unit_number[i] if len(duplicate_units) > 0: @@ -1644,9 +1614,7 @@ def plot(self, SelPackList=None, **kwargs): """ from .plot import PlotUtilities - axes = PlotUtilities._plot_model_helper( - self, SelPackList=SelPackList, **kwargs - ) + axes = PlotUtilities._plot_model_helper(self, SelPackList=SelPackList, **kwargs) return axes def to_shapefile(self, *args, **kwargs): @@ -1740,9 +1708,7 @@ def run_model( ) # make sure namefile exists - if namefile is not None and not os.path.isfile( - os.path.join(model_ws, namefile) - ): + if namefile is not None and not os.path.isfile(os.path.join(model_ws, namefile)): raise FileNotFoundError( f"The namefile for this model does not exist: {namefile}" ) diff --git a/flopy/mfusg/mfusg.py b/flopy/mfusg/mfusg.py index 6f84d2b40f..5dc926483f 100644 --- a/flopy/mfusg/mfusg.py +++ b/flopy/mfusg/mfusg.py @@ -196,9 +196,7 @@ def load( # similar to modflow command: if file does not exist , try file.nam namefile_path = os.path.join(model_ws, f) - if not os.path.isfile(namefile_path) and os.path.isfile( - f"{namefile_path}.nam" - ): + if not os.path.isfile(namefile_path) and os.path.isfile(f"{namefile_path}.nam"): namefile_path += ".nam" if not os.path.isfile(namefile_path): raise OSError(f"cannot find name file: {namefile_path}") @@ -209,9 +207,7 @@ def load( if verbose: print(f"\nCreating new model with name: {modelname}\n{50 * '-'}\n") - attribs = mfreadnam.attribs_from_namfile_header( - os.path.join(model_ws, f) - ) + attribs = mfreadnam.attribs_from_namfile_header(os.path.join(model_ws, f)) model = cls( modelname, @@ -270,9 +266,7 @@ def load( cls._set_output_external(model, ext_unit_dict) # send messages re: success/failure of loading - cls._send_load_messages( - model, files_successfully_loaded, files_not_loaded - ) + cls._send_load_messages(model, files_successfully_loaded, files_not_loaded) if check: model.check(f=f"{model.name}.chk", verbose=model.verbose, level=0) @@ -281,9 +275,7 @@ def load( return model @classmethod - def _load_packages( - cls, model, ext_unit_dict, ext_pkg_d, load_only, forgive - ): + def _load_packages(cls, model, ext_unit_dict, ext_pkg_d, load_only, forgive): """ Method to load packages into the MODFLOW-USG Model Class. For internal class use - should not be called by the user. @@ -435,9 +427,7 @@ def _prepare_external_files(model, key, item): if key not in model.external_units: model.external_fnames.append(item.filename) model.external_units.append(key) - model.external_binflag.append( - "binary" in item.filetype.lower() - ) + model.external_binflag.append("binary" in item.filetype.lower()) model.external_output.append(False) @staticmethod @@ -504,9 +494,7 @@ def _set_output_external(model, ext_unit_dict): ) @staticmethod - def _send_load_messages( - model, files_successfully_loaded, files_not_loaded - ): + def _send_load_messages(model, files_successfully_loaded, files_not_loaded): """Send messages re: success/failure of loading.""" # write message indicating packages that were successfully loaded if model.verbose: @@ -552,7 +540,5 @@ def fmt_string(array): ) raise TypeError(msg) else: - raise TypeError( - "mfusg.fmt_string error: unknown vtype in" f"field: {field}" - ) + raise TypeError(f"mfusg.fmt_string error: unknown vtype in field: {field}") return "".join(fmts) diff --git a/flopy/mfusg/mfusgbcf.py b/flopy/mfusg/mfusgbcf.py index 290b8f37b9..e60742bdb8 100644 --- a/flopy/mfusg/mfusgbcf.py +++ b/flopy/mfusg/mfusgbcf.py @@ -261,9 +261,7 @@ def write_file(self, f=None): # LAYCON array for layer in range(nlay): if self.intercellt[layer] > 0: - f_obj.write( - f"{self.intercellt[layer]:1d} {self.laycon[layer]:1d} " - ) + f_obj.write(f"{self.intercellt[layer]:1d} {self.laycon[layer]:1d} ") else: f_obj.write(f"0{self.laycon[layer]:1d} ") f_obj.write("\n") @@ -384,12 +382,8 @@ def load(cls, f, model, ext_unit_dict=None): int(text_list[5]), ) - ikvflag = type_from_iterable( - text_list, index=6, _type=int, default_val=0 - ) - ikcflag = type_from_iterable( - text_list, index=7, _type=int, default_val=0 - ) + ikvflag = type_from_iterable(text_list, index=6, _type=int, default_val=0) + ikcflag = type_from_iterable(text_list, index=7, _type=int, default_val=0) # LAYCON array laycon, intercellt = cls._load_laycon(f_obj, model) @@ -397,9 +391,7 @@ def load(cls, f, model, ext_unit_dict=None): # TRPY array if model.verbose: print(" loading TRPY...") - trpy = Util2d.load( - f_obj, model, (nlay,), np.float32, "trpy", ext_unit_dict - ) + trpy = Util2d.load(f_obj, model, (nlay,), np.float32, "trpy", ext_unit_dict) # property data for each layer based on options transient = not dis.steady.all() @@ -430,9 +422,7 @@ def load(cls, f, model, ext_unit_dict=None): if (not model.structured) and abs(ikcflag == 1): if model.verbose: print(" loading ksat (njag)...") - ksat = Util2d.load( - f_obj, model, (njag,), np.float32, "ksat", ext_unit_dict - ) + ksat = Util2d.load(f_obj, model, (njag,), np.float32, "ksat", ext_unit_dict) f_obj.close() diff --git a/flopy/mfusg/mfusgcln.py b/flopy/mfusg/mfusgcln.py index 04b3723817..e88b8f312c 100644 --- a/flopy/mfusg/mfusgcln.py +++ b/flopy/mfusg/mfusgcln.py @@ -273,9 +273,7 @@ def __init__( raise Exception("mfcln: CLN-GW connections not provided") if len(cln_gwc) != nclngwc: - raise Exception( - "mfcln: Number of CLN-GW connections not equal to nclngwc" - ) + raise Exception("mfcln: Number of CLN-GW connections not equal to nclngwc") structured = self.parent.structured @@ -334,15 +332,12 @@ def _define_cln_networks(self, model): raise Exception("mfcln: CLN network not defined") if self.ncln < 0: - raise Exception( - "mfcln: negative number of CLN segments in CLN package" - ) + raise Exception("mfcln: negative number of CLN segments in CLN package") if self.ncln > 0: # Linear CLN segments if self.nndcln is None: raise Exception( - "mfcln: number of nodes for each CLN segment must be " - "provided" + "mfcln: number of nodes for each CLN segment must be provided" ) self.nndcln = Util2d( model, @@ -391,9 +386,7 @@ def _define_cln_networks(self, model): if self.ja_cln is None: raise Exception("mfcln: ja_cln must be provided") if abs(self.ja_cln[0]) != 1: - raise Exception( - "mfcln: first ja_cln entry (node 1) is not 1 or -1." - ) + raise Exception("mfcln: first ja_cln entry (node 1) is not 1 or -1.") self.ja_cln = Util2d( model, (self.nja_cln,), @@ -407,14 +400,10 @@ def _define_cln_geometries(self): """Initialises CLN geometry types.""" # Circular conduit geometry types if self.nconduityp <= 0 or self.cln_circ is None: - raise Exception( - "mfcln: Circular conduit properties must be provided" - ) + raise Exception("mfcln: Circular conduit properties must be provided") if len(self.cln_circ) != self.nconduityp: - raise Exception( - "mfcln: Number of circular properties not equal nconduityp" - ) + raise Exception("mfcln: Number of circular properties not equal nconduityp") self.cln_circ = self._make_recarray( self.cln_circ, dtype=MfUsgClnDtypes.get_clncirc_dtype(self.bhe) @@ -472,13 +461,9 @@ def write_file(self, f=None, check=False): f_cln.write(self.iac_cln.get_file_entry()) f_cln.write(self.ja_cln.get_file_entry()) - np.savetxt( - f_cln, self.node_prop, fmt=fmt_string(self.node_prop), delimiter="" - ) + np.savetxt(f_cln, self.node_prop, fmt=fmt_string(self.node_prop), delimiter="") - np.savetxt( - f_cln, self.cln_gwc, fmt=fmt_string(self.cln_gwc), delimiter="" - ) + np.savetxt(f_cln, self.cln_gwc, fmt=fmt_string(self.cln_gwc), delimiter="") if self.nconduityp > 0: np.savetxt( @@ -624,15 +609,11 @@ def load(cls, f, model, pak_type="cln", ext_unit_dict=None, **kwargs): if model.verbose: print(" Reading ibound...") - ibound = Util2d.load( - f, model, (nclnnds, 1), np.int32, "ibound", ext_unit_dict - ) + ibound = Util2d.load(f, model, (nclnnds, 1), np.int32, "ibound", ext_unit_dict) if model.verbose: print(" Reading strt...") - strt = Util2d.load( - f, model, (nclnnds, 1), np.float32, "strt", ext_unit_dict - ) + strt = Util2d.load(f, model, (nclnnds, 1), np.float32, "strt", ext_unit_dict) if hasattr(f, "read"): f.close() diff --git a/flopy/mfusg/mfusgdisu.py b/flopy/mfusg/mfusgdisu.py index 2d3a4fb979..0c284387d2 100644 --- a/flopy/mfusg/mfusgdisu.py +++ b/flopy/mfusg/mfusgdisu.py @@ -267,9 +267,7 @@ def __init__( self.idsymrd = idsymrd # LAYCBD - self.laycbd = Util2d( - model, (self.nlay,), np.int32, laycbd, name="laycbd" - ) + self.laycbd = Util2d(model, (self.nlay,), np.int32, laycbd, name="laycbd") self.laycbd[-1] = 0 # bottom layer must be zero # NODELAY @@ -420,13 +418,9 @@ def __init__( ) # Stress period information - self.perlen = Util2d( - model, (self.nper,), np.float32, perlen, name="perlen" - ) + self.perlen = Util2d(model, (self.nper,), np.float32, perlen, name="perlen") self.nstp = Util2d(model, (self.nper,), np.int32, nstp, name="nstp") - self.tsmult = Util2d( - model, (self.nper,), np.float32, tsmult, name="tsmult" - ) + self.tsmult = Util2d(model, (self.nper,), np.float32, tsmult, name="tsmult") self.steady = Util2d(model, (self.nper,), bool, steady, name="steady") self.itmuni_dict = { @@ -449,9 +443,7 @@ def __init__( lenuni=self.lenuni, ) - self.tr = TemporalReference( - itmuni=self.itmuni, start_datetime=start_datetime - ) + self.tr = TemporalReference(itmuni=self.itmuni, start_datetime=start_datetime) self.start_datetime = start_datetime @@ -613,9 +605,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): # dataset 3 -- nodelay if model.verbose: print(" loading NODELAY...") - nodelay = Util2d.load( - f, model, (nlay,), np.int32, "nodelay", ext_unit_dict - ) + nodelay = Util2d.load(f, model, (nlay,), np.int32, "nodelay", ext_unit_dict) if model.verbose: print(f" NODELAY {nodelay}") @@ -624,9 +614,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): print(" loading TOP...") top = [0] * nlay for k in range(nlay): - tpk = Util2d.load( - f, model, (nodelay[k],), np.float32, "top", ext_unit_dict - ) + tpk = Util2d.load(f, model, (nodelay[k],), np.float32, "top", ext_unit_dict) top[k] = tpk if model.verbose: for k, tpk in enumerate(top): @@ -637,9 +625,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): print(" loading BOT...") bot = [0] * nlay for k in range(nlay): - btk = Util2d.load( - f, model, (nodelay[k],), np.float32, "btk", ext_unit_dict - ) + btk = Util2d.load(f, model, (nodelay[k],), np.float32, "btk", ext_unit_dict) bot[k] = btk if model.verbose: for k, btk in enumerate(bot): @@ -682,9 +668,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if ivsd == 1: if model.verbose: print(" loading IVC...") - ivc = Util2d.load( - f, model, (njag,), np.int32, "ivc", ext_unit_dict - ) + ivc = Util2d.load(f, model, (njag,), np.int32, "ivc", ext_unit_dict) if model.verbose: print(f" IVC {ivc}") @@ -693,9 +677,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if idsymrd == 1: if model.verbose: print(" loading CL1...") - cl1 = Util2d.load( - f, model, (njags,), np.float32, "cl1", ext_unit_dict - ) + cl1 = Util2d.load(f, model, (njags,), np.float32, "cl1", ext_unit_dict) if model.verbose: print(f" CL1 {cl1}") @@ -704,9 +686,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if idsymrd == 1: if model.verbose: print(" loading CL2...") - cl2 = Util2d.load( - f, model, (njags,), np.float32, "cl2", ext_unit_dict - ) + cl2 = Util2d.load(f, model, (njags,), np.float32, "cl2", ext_unit_dict) if model.verbose: print(f" CL2 {cl2}") @@ -715,9 +695,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if idsymrd == 0: if model.verbose: print(" loading CL12...") - cl12 = Util2d.load( - f, model, (njag,), np.float32, "cl12", ext_unit_dict - ) + cl12 = Util2d.load(f, model, (njag,), np.float32, "cl12", ext_unit_dict) if model.verbose: print(f" CL12 {cl12}") @@ -879,9 +857,7 @@ def write_file(self): # Item 13: NPER, NSTP, TSMULT, Ss/tr for t in range(self.nper): - f_dis.write( - f"{self.perlen[t]:14f}{self.nstp[t]:14d}{self.tsmult[t]:10f} " - ) + f_dis.write(f"{self.perlen[t]:14f}{self.nstp[t]:14d}{self.tsmult[t]:10f} ") if self.steady[t]: f_dis.write(" SS\n") else: diff --git a/flopy/mfusg/mfusggnc.py b/flopy/mfusg/mfusggnc.py index 2d12a47e3f..743ec7add5 100644 --- a/flopy/mfusg/mfusggnc.py +++ b/flopy/mfusg/mfusggnc.py @@ -128,9 +128,7 @@ def __init__( if 0 < numalphaj < 6: self.numalphaj = numalphaj else: - raise Exception( - "mfgnc: incorrect number of adjacent contributing nodes" - ) + raise Exception("mfgnc: incorrect number of adjacent contributing nodes") self.i2kn = i2kn self.isymgncn = isymgncn @@ -140,9 +138,7 @@ def __init__( raise Exception("mfgnc: GNC data must be provided") if len(gncdata) != self.numgnc: - raise Exception( - "mfgnc: Length of GNC data must equal number of GNC nodes" - ) + raise Exception("mfgnc: Length of GNC data must equal number of GNC nodes") self.dtype = MfUsgGnc.get_default_dtype(self.numalphaj, self.iflalphan) diff --git a/flopy/mfusg/mfusglpf.py b/flopy/mfusg/mfusglpf.py index a3ca1bab00..be91d48129 100644 --- a/flopy/mfusg/mfusglpf.py +++ b/flopy/mfusg/mfusglpf.py @@ -379,9 +379,7 @@ def write_file(self, check=True, f=None): # Item 7: WETFCT, IWETIT, IHDWET iwetdry = self.laywet.sum() if iwetdry > 0: - f_obj.write( - f"{self.wetfct:10f}{self.iwetit:10d}{self.ihdwet:10d}\n" - ) + f_obj.write(f"{self.wetfct:10f}{self.iwetit:10d}{self.ihdwet:10d}\n") transient = not dis.steady.all() structured = self.parent.structured @@ -522,9 +520,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if abs(ikcflag) == 1: if model.verbose: print(" loading ksat...") - ksat = Util2d.load( - f_obj, model, (njag,), np.float32, "ksat", ext_unit_dict - ) + ksat = Util2d.load(f_obj, model, (njag,), np.float32, "ksat", ext_unit_dict) f_obj.close() @@ -593,9 +589,7 @@ def _load_item1(line, model): ] constantcv = "CONSTANTCV" in [item.upper() for item in text_list] thickstrt = "THICKSTRT" in [item.upper() for item in text_list] - nocvcorrection = "NOCVCORRECTION" in [ - item.upper() for item in text_list - ] + nocvcorrection = "NOCVCORRECTION" in [item.upper() for item in text_list] novfc = "NOVFC" in [item.upper() for item in text_list] return ( @@ -854,9 +848,7 @@ def _load_layer_properties( return hk, hani, vka, ss, sy, vkcb, wetdry @staticmethod - def _load_storage( - f_obj, model, layer_vars, ext_unit_dict, par_types_parm_dict - ): + def _load_storage(f_obj, model, layer_vars, ext_unit_dict, par_types_parm_dict): """ Loads ss, sy file entries. diff --git a/flopy/mfusg/mfusgsms.py b/flopy/mfusg/mfusgsms.py index f356292205..afd00d11c2 100644 --- a/flopy/mfusg/mfusgsms.py +++ b/flopy/mfusg/mfusgsms.py @@ -463,10 +463,7 @@ def load(cls, f, model, ext_unit_dict=None): # Record 1b -- line will have already been read if model.verbose: - print( - " loading HCLOSE HICLOSE MXITER ITER1 " - "IPRSMS NONLINMETH LINMETH..." - ) + print(" loading HCLOSE HICLOSE MXITER ITER1 IPRSMS NONLINMETH LINMETH...") ll = line_parse(line) hclose = float(ll.pop(0)) hiclose = float(ll.pop(0)) diff --git a/flopy/mfusg/mfusgwel.py b/flopy/mfusg/mfusgwel.py index 493c7a19c7..9a859734b4 100644 --- a/flopy/mfusg/mfusgwel.py +++ b/flopy/mfusg/mfusgwel.py @@ -229,9 +229,7 @@ def __init__( if dtype is not None: self.dtype = dtype else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured - ) + self.dtype = self.get_default_dtype(structured=self.parent.structured) # determine if any aux variables in dtype options = self._check_for_aux(options) @@ -239,9 +237,7 @@ def __init__( self.options = options # initialize MfList - self.stress_period_data = MfList( - self, stress_period_data, binary=binary - ) + self.stress_period_data = MfList(self, stress_period_data, binary=binary) if add_package: self.parent.add_package(self) @@ -297,9 +293,7 @@ def write_file(self, f=None): f_wel.write(f"{self.heading}\n") - mxact = ( - self.stress_period_data.mxact + self.cln_stress_period_data.mxact - ) + mxact = self.stress_period_data.mxact + self.cln_stress_period_data.mxact line = f" {mxact:9d} {self.ipakcb:9d} " if self.options is None: diff --git a/flopy/modflow/mf.py b/flopy/modflow/mf.py index b1c2a65aed..f4c3836e85 100644 --- a/flopy/modflow/mf.py +++ b/flopy/modflow/mf.py @@ -228,9 +228,8 @@ def __init__( def __repr__(self): nrow, ncol, nlay, nper = self.get_nrow_ncol_nlay_nper() # structured case - s = ( - "MODFLOW {} layer(s) {} row(s) {} column(s) " - "{} stress period(s)".format(nlay, nrow, ncol, nper) + s = "MODFLOW {} layer(s) {} row(s) {} column(s) {} stress period(s)".format( + nlay, nrow, ncol, nper ) return s @@ -264,11 +263,7 @@ def modelgrid(self): else: ibound = None # take the first non-None entry - crs = ( - self._modelgrid.crs - or self._modelgrid.proj4 - or self._modelgrid.epsg - ) + crs = self._modelgrid.crs or self._modelgrid.proj4 or self._modelgrid.epsg common_kwargs = { "crs": crs, "xoff": self._modelgrid.xoffset, @@ -292,10 +287,7 @@ def modelgrid(self): ja=self.disu.ja.array, **common_kwargs, ) - print( - "WARNING: Model grid functionality limited for unstructured " - "grid." - ) + print("WARNING: Model grid functionality limited for unstructured grid.") else: # build structured grid self._modelgrid = StructuredGrid( @@ -483,9 +475,7 @@ def write_name_file(self): f_nam.write(f"DATA {u:5d} {f}\n") # write the output files - for u, f, b in zip( - self.output_units, self.output_fnames, self.output_binflag - ): + for u, f, b in zip(self.output_units, self.output_fnames, self.output_binflag): if u == 0: continue if b: @@ -846,15 +836,11 @@ def load( ) files_successfully_loaded.append(item.filename) if ml.verbose: - print( - f" {item.filetype:4s} package load...success" - ) + print(f" {item.filetype:4s} package load...success") except Exception as e: ml.load_fail = True if ml.verbose: - print( - f" {item.filetype:4s} package load...failed" - ) + print(f" {item.filetype:4s} package load...failed") print(f" {e!s}") files_not_loaded.append(item.filename) else: @@ -873,9 +859,7 @@ def load( ) files_successfully_loaded.append(item.filename) if ml.verbose: - print( - f" {item.filetype:4s} package load...success" - ) + print(f" {item.filetype:4s} package load...success") else: if ml.verbose: print(f" {item.filetype:4s} package load...skipped") @@ -893,9 +877,7 @@ def load( if key not in ml.external_units: ml.external_fnames.append(item.filename) ml.external_units.append(key) - ml.external_binflag.append( - "binary" in item.filetype.lower() - ) + ml.external_binflag.append("binary" in item.filetype.lower()) ml.external_output.append(False) else: raise KeyError(f"unhandled case: {key}, {item}") diff --git a/flopy/modflow/mfaddoutsidefile.py b/flopy/modflow/mfaddoutsidefile.py index b9935cd24f..10cb48de4f 100644 --- a/flopy/modflow/mfaddoutsidefile.py +++ b/flopy/modflow/mfaddoutsidefile.py @@ -8,9 +8,7 @@ class mfaddoutsidefile(Package): def __init__(self, model, name, extension, unitnumber): # call base package constructor - super().__init__( - model, extension, name, unitnumber, allowDuplicates=True - ) + super().__init__(model, extension, name, unitnumber, allowDuplicates=True) self.parent.add_package(self) def __repr__(self): diff --git a/flopy/modflow/mfag.py b/flopy/modflow/mfag.py index 9c0e5125f8..2b2597335f 100644 --- a/flopy/modflow/mfag.py +++ b/flopy/modflow/mfag.py @@ -111,9 +111,7 @@ class ModflowAg(Package): OptionBlock.dtype: np.bool_, OptionBlock.nested: True, OptionBlock.n_nested: 1, - OptionBlock.vars: dict( - [("nummaxwell", OptionBlock.simple_int)] - ), + OptionBlock.vars: dict([("nummaxwell", OptionBlock.simple_int)]), }, ), ("tabfiles", OptionBlock.simple_tabfile), @@ -155,9 +153,7 @@ class ModflowAg(Package): OptionBlock.dtype: np.bool_, OptionBlock.nested: True, OptionBlock.n_nested: 1, - OptionBlock.vars: dict( - [("unit_welllist", OptionBlock.simple_int)] - ), + OptionBlock.vars: dict([("unit_welllist", OptionBlock.simple_int)]), }, ), ( @@ -188,9 +184,7 @@ class ModflowAg(Package): OptionBlock.dtype: np.bool_, OptionBlock.nested: True, OptionBlock.n_nested: 1, - OptionBlock.vars: dict( - [("unitcbc", OptionBlock.simple_int)] - ), + OptionBlock.vars: dict([("unitcbc", OptionBlock.simple_int)]), }, ), ] @@ -211,9 +205,7 @@ def __init__( nper=0, ): if "nwt" not in model.version: - raise AssertionError( - "Model version must be mfnwt to use the AG package" - ) + raise AssertionError("Model version must be mfnwt to use the AG package") # setup the package parent class if unitnumber is None: @@ -357,9 +349,7 @@ def write_file(self, check=False): foo.write("TIME SERIES \n") for record in self.time_series: if record["keyword"] in ("welletall", "wellall"): - foo.write( - f"{record['keyword']} {record['unit']}\n".upper() - ) + foo.write(f"{record['keyword']} {record['unit']}\n".upper()) else: foo.write(fmt.format(*record).upper()) @@ -450,9 +440,7 @@ def write_file(self, check=False): ) else: foo.write( - fmt20.format( - rec["segid"], rec["numcell"] - ) + fmt20.format(rec["segid"], rec["numcell"]) ) for i in range(num): @@ -503,9 +491,7 @@ def write_file(self, check=False): ) else: foo.write( - fmt24.format( - rec["wellid"] + 1, rec["numcell"] - ) + fmt24.format(rec["wellid"] + 1, rec["numcell"]) ) for i in range(num): @@ -540,9 +526,7 @@ def write_file(self, check=False): num = rec["numcell"] foo.write( - fmt28.format( - rec["wellid"] + 1, rec["numcell"] - ) + fmt28.format(rec["wellid"] + 1, rec["numcell"]) ) for i in range(num): diff --git a/flopy/modflow/mfbas.py b/flopy/modflow/mfbas.py index 66433fd3dd..8596696784 100644 --- a/flopy/modflow/mfbas.py +++ b/flopy/modflow/mfbas.py @@ -191,9 +191,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None): neighbors = chk.get_neighbors(self.ibound.array) if isinstance(neighbors, np.ndarray): - neighbors[np.isnan(neighbors)] = ( - 0 # set neighbors at edges to 0 (inactive) - ) + neighbors[np.isnan(neighbors)] = 0 # set neighbors at edges to 0 (inactive) chk.values( self.ibound.array, (self.ibound.array > 0) & np.all(neighbors < 1, axis=0), diff --git a/flopy/modflow/mfbcf.py b/flopy/modflow/mfbcf.py index 83d676448c..6060314405 100644 --- a/flopy/modflow/mfbcf.py +++ b/flopy/modflow/mfbcf.py @@ -282,9 +282,7 @@ def write_file(self, f=None): f_bcf.write(self.vcont[k].get_file_entry()) if transient and ((self.laycon[k] == 2) or (self.laycon[k] == 3)): f_bcf.write(self.sf2[k].get_file_entry()) - if (self.iwdflg != 0) and ( - (self.laycon[k] == 1) or (self.laycon[k] == 3) - ): + if (self.iwdflg != 0) and ((self.laycon[k] == 1) or (self.laycon[k] == 3)): f_bcf.write(self.wetdry[k].get_file_entry()) f_bcf.close() @@ -402,9 +400,7 @@ def load(cls, f, model, ext_unit_dict=None): # TRPY array if model.verbose: print(" loading TRPY...") - trpy = Util2d.load( - f, model, (nlay,), np.float32, "trpy", ext_unit_dict - ) + trpy = Util2d.load(f, model, (nlay,), np.float32, "trpy", ext_unit_dict) # property data for each layer based on options transient = not dis.steady.all() @@ -447,9 +443,7 @@ def load(cls, f, model, ext_unit_dict=None): else: if model.verbose: print(f" loading hy layer {k + 1:3d}...") - t = Util2d.load( - f, model, (nrow, ncol), np.float32, "hy", ext_unit_dict - ) + t = Util2d.load(f, model, (nrow, ncol), np.float32, "hy", ext_unit_dict) hy[k] = t # vcont @@ -490,9 +484,7 @@ def load(cls, f, model, ext_unit_dict=None): ext_unit_dict, filetype=ModflowBcf._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) # create instance of bcf object diff --git a/flopy/modflow/mfchd.py b/flopy/modflow/mfchd.py index 530b606da0..3ed45aebbe 100644 --- a/flopy/modflow/mfchd.py +++ b/flopy/modflow/mfchd.py @@ -130,9 +130,7 @@ def __init__( if dtype is not None: self.dtype = dtype else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured - ) + self.dtype = self.get_default_dtype(structured=self.parent.structured) self.stress_period_data = MfList(self, stress_period_data) self.np = 0 diff --git a/flopy/modflow/mfdis.py b/flopy/modflow/mfdis.py index bd6b9903df..cb1c103178 100644 --- a/flopy/modflow/mfdis.py +++ b/flopy/modflow/mfdis.py @@ -184,9 +184,7 @@ def __init__( # Set values of all parameters self._generate_heading() - self.laycbd = Util2d( - model, (self.nlay,), np.int32, laycbd, name="laycbd" - ) + self.laycbd = Util2d(model, (self.nlay,), np.int32, laycbd, name="laycbd") self.laycbd[-1] = 0 # bottom layer must be zero self.delr = Util2d( model, @@ -220,13 +218,9 @@ def __init__( "botm", locat=self.unit_number[0], ) - self.perlen = Util2d( - model, (self.nper,), np.float32, perlen, name="perlen" - ) + self.perlen = Util2d(model, (self.nper,), np.float32, perlen, name="perlen") self.nstp = Util2d(model, (self.nper,), np.int32, nstp, name="nstp") - self.tsmult = Util2d( - model, (self.nper,), np.float32, tsmult, name="tsmult" - ) + self.tsmult = Util2d(model, (self.nper,), np.float32, tsmult, name="tsmult") self.steady = Util2d(model, (self.nper,), bool, steady, name="steady") try: @@ -279,9 +273,7 @@ def __init__( if start_datetime is None: start_datetime = model._start_datetime - self.tr = TemporalReference( - itmuni=self.itmuni, start_datetime=start_datetime - ) + self.tr = TemporalReference(itmuni=self.itmuni, start_datetime=start_datetime) self.start_datetime = start_datetime self._totim = None @@ -396,9 +388,7 @@ def get_kstp_kper_toffset(self, t=0.0, use_cached_totim=False): break return kstp, kper, toffset - def get_totim_from_kper_toffset( - self, kper=0, toffset=0.0, use_cached_totim=False - ): + def get_totim_from_kper_toffset(self, kper=0, toffset=0.0, use_cached_totim=False): """ Get totim from a passed kper and time offset from the beginning of a stress period @@ -425,9 +415,7 @@ def get_totim_from_kper_toffset( if kper < 0: kper = 0.0 if kper >= self.nper: - raise ValueError( - f"kper ({kper}) must be less than to nper ({self.nper})." - ) + raise ValueError(f"kper ({kper}) must be less than to nper ({self.nper}).") totim = self.get_totim(use_cached_totim) nstp = self.nstp.array @@ -648,9 +636,7 @@ def write_file(self, check=True): # Item 6: NPER, NSTP, TSMULT, Ss/tr for t in range(self.nper): - f_dis.write( - f"{self.perlen[t]:14f}{self.nstp[t]:14d}{self.tsmult[t]:10f} " - ) + f_dis.write(f"{self.perlen[t]:14f}{self.nstp[t]:14d}{self.tsmult[t]:10f} ") if self.steady[t]: f_dis.write(" SS\n") else: @@ -808,21 +794,15 @@ def load(cls, f, model, ext_unit_dict=None, check=True): # dataset 3 -- delr if model.verbose: print(" loading delr...") - delr = Util2d.load( - f, model, (ncol,), np.float32, "delr", ext_unit_dict - ) + delr = Util2d.load(f, model, (ncol,), np.float32, "delr", ext_unit_dict) # dataset 4 -- delc if model.verbose: print(" loading delc...") - delc = Util2d.load( - f, model, (nrow,), np.float32, "delc", ext_unit_dict - ) + delc = Util2d.load(f, model, (nrow,), np.float32, "delc", ext_unit_dict) # dataset 5 -- top if model.verbose: print(" loading top...") - top = Util2d.load( - f, model, (nrow, ncol), np.float32, "top", ext_unit_dict - ) + top = Util2d.load(f, model, (nrow, ncol), np.float32, "top", ext_unit_dict) # dataset 6 -- botm ncbd = laycbd.sum() if model.verbose: diff --git a/flopy/modflow/mfdrn.py b/flopy/modflow/mfdrn.py index 1639457340..7a57ed5be1 100644 --- a/flopy/modflow/mfdrn.py +++ b/flopy/modflow/mfdrn.py @@ -251,9 +251,7 @@ def add_record(self, kper, index, values): @staticmethod def get_empty(ncells=0, aux_names=None, structured=True, is_drt=False): # get an empty recarray that corresponds to dtype - dtype = ModflowDrn.get_default_dtype( - structured=structured, is_drt=is_drt - ) + dtype = ModflowDrn.get_default_dtype(structured=structured, is_drt=is_drt) if aux_names is not None: dtype = Package.add_to_dtype(dtype, aux_names, np.float32) return create_empty_recarray(ncells, dtype, default_value=-1.0e10) diff --git a/flopy/modflow/mfdrt.py b/flopy/modflow/mfdrt.py index c4988afefa..af507d5e3f 100644 --- a/flopy/modflow/mfdrt.py +++ b/flopy/modflow/mfdrt.py @@ -153,9 +153,7 @@ def __init__( if dtype is not None: self.dtype = dtype else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured - ) + self.dtype = self.get_default_dtype(structured=self.parent.structured) self.stress_period_data = MfList(self, stress_period_data) self.parent.add_package(self) diff --git a/flopy/modflow/mfevt.py b/flopy/modflow/mfevt.py index f3e92f3f0a..7e469422fb 100644 --- a/flopy/modflow/mfevt.py +++ b/flopy/modflow/mfevt.py @@ -132,18 +132,10 @@ def __init__( exdp_u2d_shape = get_pak_vals_shape(model, exdp) ievt_u2d_shape = get_pak_vals_shape(model, ievt) - self.surf = Transient2d( - model, surf_u2d_shape, np.float32, surf, name="surf" - ) - self.evtr = Transient2d( - model, evtr_u2d_shape, np.float32, evtr, name="evtr" - ) - self.exdp = Transient2d( - model, exdp_u2d_shape, np.float32, exdp, name="exdp" - ) - self.ievt = Transient2d( - model, ievt_u2d_shape, np.int32, ievt, name="ievt" - ) + self.surf = Transient2d(model, surf_u2d_shape, np.float32, surf, name="surf") + self.evtr = Transient2d(model, evtr_u2d_shape, np.float32, evtr, name="evtr") + self.exdp = Transient2d(model, exdp_u2d_shape, np.float32, exdp, name="exdp") + self.ievt = Transient2d(model, ievt_u2d_shape, np.int32, ievt, name="ievt") self.np = 0 self.parent.add_package(self) @@ -190,10 +182,7 @@ def write_file(self, f=None): ) if not self.parent.structured: mxndevt = np.max( - [ - u2d.array.size - for kper, u2d in self.ievt.transient_2ds.items() - ] + [u2d.array.size for kper, u2d in self.ievt.transient_2ds.items()] ) f_evt.write(f"{mxndevt:10d}\n") @@ -274,9 +263,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): npar = int(raw[1]) if npar > 0: if model.verbose: - print( - " Parameters detected. Number of parameters = ", npar - ) + print(" Parameters detected. Number of parameters = ", npar) line = f.readline() # Dataset 2 t = line.strip().split() @@ -327,18 +314,14 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): if insurf >= 0: if model.verbose: print(f" loading surf stress period {iper + 1:3d}...") - t = Util2d.load( - f, model, u2d_shape, np.float32, "surf", ext_unit_dict - ) + t = Util2d.load(f, model, u2d_shape, np.float32, "surf", ext_unit_dict) current_surf = t surf[iper] = current_surf if inevtr >= 0: if npar == 0: if model.verbose: - print( - f" loading evtr stress period {iper + 1:3d}..." - ) + print(f" loading evtr stress period {iper + 1:3d}...") t = Util2d.load( f, model, @@ -366,26 +349,20 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): except: iname = "static" parm_dict[pname] = iname - t = mfparbc.parameter_bcfill( - model, u2d_shape, parm_dict, pak_parms - ) + t = mfparbc.parameter_bcfill(model, u2d_shape, parm_dict, pak_parms) current_evtr = t evtr[iper] = current_evtr if inexdp >= 0: if model.verbose: print(f" loading exdp stress period {iper + 1:3d}...") - t = Util2d.load( - f, model, u2d_shape, np.float32, "exdp", ext_unit_dict - ) + t = Util2d.load(f, model, u2d_shape, np.float32, "exdp", ext_unit_dict) current_exdp = t exdp[iper] = current_exdp if nevtop == 2: if inievt >= 0: if model.verbose: - print( - f" loading ievt stress period {iper + 1:3d}..." - ) + print(f" loading ievt stress period {iper + 1:3d}...") t = Util2d.load( f, model, u2d_shape, np.int32, "ievt", ext_unit_dict ) @@ -419,9 +396,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): ext_unit_dict, filetype=ModflowEvt._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) # set args for unitnumber and filenames diff --git a/flopy/modflow/mffhb.py b/flopy/modflow/mffhb.py index 7e68a08be4..259caa8ba7 100644 --- a/flopy/modflow/mffhb.py +++ b/flopy/modflow/mffhb.py @@ -237,7 +237,7 @@ def __init__( # perform some simple verification if len(self.bdtime) != self.nbdtim: raise ValueError( - "bdtime has {} entries but requires " "{} entries.".format( + "bdtime has {} entries but requires {} entries.".format( len(self.bdtime), self.nbdtim ) ) @@ -250,7 +250,7 @@ def __init__( if self.ds5.shape[0] != self.nflw: raise ValueError( - "dataset 5 has {} rows but requires " "{} rows.".format( + "dataset 5 has {} rows but requires {} rows.".format( self.ds5.shape[0], self.nflw ) ) @@ -261,8 +261,9 @@ def __init__( nc += 2 if len(self.ds5.dtype.names) != nc: raise ValueError( - "dataset 5 has {} columns but requires " - "{} columns.".format(len(self.ds5.dtype.names), nc) + "dataset 5 has {} columns but requires {} columns.".format( + len(self.ds5.dtype.names), nc + ) ) if self.nhed > 0: @@ -272,7 +273,7 @@ def __init__( ) if self.ds7.shape[0] != self.nhed: raise ValueError( - "dataset 7 has {} rows but requires " "{} rows.".format( + "dataset 7 has {} rows but requires {} rows.".format( self.ds7.shape[0], self.nhed ) ) @@ -283,8 +284,9 @@ def __init__( nc += 2 if len(self.ds7.dtype.names) != nc: raise ValueError( - "dataset 7 has {} columns but requires " - "{} columns.".format(len(self.ds7.dtype.names), nc) + "dataset 7 has {} columns but requires {} columns.".format( + len(self.ds7.dtype.names), nc + ) ) self.parent.add_package(self) @@ -570,10 +572,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): for naux in range(nfhbx1): if model.verbose: print(f"loading fhb dataset 6a - aux {naux + 1}") - print( - "dataset 6a will not be preserved in " - "the created fhb object." - ) + print("dataset 6a will not be preserved in the created fhb object.") # Dataset 6a IFHBUN CNSTM IFHBPT line = f.readline() raw = line.strip().split() @@ -589,10 +588,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): if model.verbose: print(f"loading fhb dataset 6b - aux {naux + 1}") - print( - "dataset 6b will not be preserved in " - "the created fhb object." - ) + print("dataset 6b will not be preserved in the created fhb object.") current = np.recarray(nflw, dtype=dtype) for n in range(nflw): ds6b = read1d(f, np.zeros((nbdtim,))) @@ -647,10 +643,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): for naux in range(nfhbx1): if model.verbose: print(f"loading fhb dataset 8a - aux {naux + 1}") - print( - "dataset 8a will not be preserved in " - "the created fhb object." - ) + print("dataset 8a will not be preserved in the created fhb object.") # Dataset 6a IFHBUN CNSTM IFHBPT line = f.readline() raw = line.strip().split() @@ -667,10 +660,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): if model.verbose: print(f"loading fhb dataset 8b - aux {naux + 1}") - print( - "dataset 8b will not be preserved in " - "the created fhb object." - ) + print("dataset 8b will not be preserved in the created fhb object.") current = np.recarray(nflw, dtype=dtype) for n in range(nhed): ds8b = read1d(f, np.zeros((nbdtim,))) @@ -688,9 +678,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): ext_unit_dict, filetype=ModflowFhb._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) # auxiliary data are not passed to load instantiation diff --git a/flopy/modflow/mfflwob.py b/flopy/modflow/mfflwob.py index 1513ed3f00..fc4b2b0bb9 100644 --- a/flopy/modflow/mfflwob.py +++ b/flopy/modflow/mfflwob.py @@ -231,18 +231,10 @@ def __init__( self.factor = factor # -create empty arrays of the correct size - self.layer = np.zeros( - (self.nqfb, max(np.abs(self.nqclfb))), dtype="int32" - ) - self.row = np.zeros( - (self.nqfb, max(np.abs(self.nqclfb))), dtype="int32" - ) - self.column = np.zeros( - (self.nqfb, max(np.abs(self.nqclfb))), dtype="int32" - ) - self.factor = np.zeros( - (self.nqfb, max(np.abs(self.nqclfb))), dtype="float32" - ) + self.layer = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), dtype="int32") + self.row = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), dtype="int32") + self.column = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), dtype="int32") + self.factor = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), dtype="float32") self.nqobfb = np.zeros((self.nqfb), dtype="int32") self.nqclfb = np.zeros((self.nqfb), dtype="int32") self.irefsp = np.zeros((self.nqtfb), dtype="int32") @@ -503,9 +495,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): ext_unit_dict, filetype=ftype.upper() ) if iufbobsv > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=iufbobsv - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=iufbobsv) model.add_pop_key_list(iufbobsv) # create ModflowFlwob object instance diff --git a/flopy/modflow/mfgage.py b/flopy/modflow/mfgage.py index 1b7c4b76d0..588e6dbd1f 100644 --- a/flopy/modflow/mfgage.py +++ b/flopy/modflow/mfgage.py @@ -131,9 +131,7 @@ def __init__( # convert gage_data to a recarray, if necessary if isinstance(gage_data, np.ndarray): if not gage_data.dtype == dtype: - gage_data = np.rec.fromarrays( - gage_data.transpose(), dtype=dtype - ) + gage_data = np.rec.fromarrays(gage_data.transpose(), dtype=dtype) elif isinstance(gage_data, pd.DataFrame): gage_data = gage_data.to_records(index=False) elif isinstance(gage_data, list): @@ -159,8 +157,7 @@ def __init__( gage_data = d else: raise Exception( - "gage_data must be a numpy record array, numpy array " - "or a list" + "gage_data must be a numpy record array, numpy array or a list" ) # add gage output files to model @@ -347,9 +344,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): for key, value in ext_unit_dict.items(): if key == abs(iu): model.add_pop_key_list(abs(iu)) - relpth = os.path.relpath( - value.filename, model.model_ws - ) + relpth = os.path.relpath(value.filename, model.model_ws) files.append(relpth) break diff --git a/flopy/modflow/mfghb.py b/flopy/modflow/mfghb.py index 33b9e99fd7..af0ab4102c 100644 --- a/flopy/modflow/mfghb.py +++ b/flopy/modflow/mfghb.py @@ -148,9 +148,7 @@ def __init__( if dtype is not None: self.dtype = dtype else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured - ) + self.dtype = self.get_default_dtype(structured=self.parent.structured) self.stress_period_data = MfList(self, stress_period_data) def _ncells(self): diff --git a/flopy/modflow/mfgmg.py b/flopy/modflow/mfgmg.py index 9987e42f40..14fe8796ad 100644 --- a/flopy/modflow/mfgmg.py +++ b/flopy/modflow/mfgmg.py @@ -272,13 +272,9 @@ def write_file(self): f_gmg = open(self.fn_path, "w") f_gmg.write(f"{self.heading}\n") # dataset 0 - f_gmg.write( - f"{self.rclose} {self.iiter} {self.hclose} {self.mxiter}\n" - ) + f_gmg.write(f"{self.rclose} {self.iiter} {self.hclose} {self.mxiter}\n") # dataset 1 - f_gmg.write( - f"{self.damp} {self.iadamp} {self.ioutgmg} {self.iunitmhc}\n" - ) + f_gmg.write(f"{self.damp} {self.iadamp} {self.ioutgmg} {self.iunitmhc}\n") # dataset 2 f_gmg.write(f"{self.ism} {self.isc} ") if self.iadamp == 2: @@ -377,9 +373,7 @@ def load(cls, f, model, ext_unit_dict=None): ext_unit_dict, filetype=ModflowGmg._ftype() ) if iunitmhc > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=iunitmhc - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=iunitmhc) model.add_pop_key_list(iunitmhc) return cls( diff --git a/flopy/modflow/mfhfb.py b/flopy/modflow/mfhfb.py index 6d9479c132..e018f54d4b 100644 --- a/flopy/modflow/mfhfb.py +++ b/flopy/modflow/mfhfb.py @@ -203,9 +203,7 @@ def write_file(self): ) ) else: - f_hfb.write( - "{:10d}{:10d}{:13.6g}\n".format(a[0] + 1, a[1] + 1, a[2]) - ) + f_hfb.write("{:10d}{:10d}{:13.6g}\n".format(a[0] + 1, a[1] + 1, a[2])) f_hfb.write(f"{self.nacthfb:10d}") f_hfb.close() @@ -385,9 +383,7 @@ def load(cls, f, model, ext_unit_dict=None): # fill current parameter data (par_current) for ibnd, t in enumerate(data_dict): t = tuple(t) - par_current[ibnd] = tuple( - t[: len(par_current.dtype.names)] - ) + par_current[ibnd] = tuple(t[: len(par_current.dtype.names)]) # convert indices to zero-based if structured: diff --git a/flopy/modflow/mfhob.py b/flopy/modflow/mfhob.py index 261e15a0fa..c2761d72d4 100644 --- a/flopy/modflow/mfhob.py +++ b/flopy/modflow/mfhob.py @@ -614,9 +614,7 @@ def __init__( raise ValueError( "sum of dataset 4 proportions must equal 1.0 - " "sum of dataset 4 proportions = {tot} for " - "observation name {obsname}.".format( - tot=tot, obsname=self.obsname - ) + "observation name {obsname}.".format(tot=tot, obsname=self.obsname) ) # convert passed time_series_data to a numpy array @@ -652,8 +650,7 @@ def __init__( names = [names] elif not isinstance(names, list): raise ValueError( - "HeadObservation names must be a " - "string or a list of strings" + "HeadObservation names must be a string or a list of strings" ) if len(names) < self.nobs: raise ValueError( diff --git a/flopy/modflow/mfhyd.py b/flopy/modflow/mfhyd.py index 29af549886..56e9da4c94 100644 --- a/flopy/modflow/mfhyd.py +++ b/flopy/modflow/mfhyd.py @@ -333,9 +333,7 @@ def load(cls, f, model, ext_unit_dict=None): ext_unit_dict, filetype=ModflowHyd._ftype() ) if ihydun > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ihydun - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ihydun) model.add_pop_key_list(ihydun) # return hyd instance diff --git a/flopy/modflow/mflak.py b/flopy/modflow/mflak.py index 1491119e7f..dd686441ab 100644 --- a/flopy/modflow/mflak.py +++ b/flopy/modflow/mflak.py @@ -376,7 +376,7 @@ def __init__( if self.dis.steady[0]: if stage_range.shape != (nlakes, 2): raise Exception( - "stages shape should be ({},2) but is only " "{}.".format( + "stages shape should be ({},2) but is only {}.".format( nlakes, stage_range.shape ) ) @@ -514,15 +514,9 @@ def write_file(self): if self.tabdata: ipos.append(5) t.append(self.iunit_tab[n]) - f.write( - write_fixed_var( - t, ipos=ipos, free=self.parent.free_format_input - ) - ) + f.write(write_fixed_var(t, ipos=ipos, free=self.parent.free_format_input)) - ds8_keys = ( - list(self.sill_data.keys()) if self.sill_data is not None else [] - ) + ds8_keys = list(self.sill_data.keys()) if self.sill_data is not None else [] ds9_keys = list(self.flux_data.keys()) nper = self.dis.steady.shape[0] for kper in range(nper): @@ -541,9 +535,7 @@ def write_file(self): t = [itmp, itmp2, tmplwrt] comment = f"Stress period {kper + 1}" f.write( - write_fixed_var( - t, free=self.parent.free_format_input, comment=comment - ) + write_fixed_var(t, free=self.parent.free_format_input, comment=comment) ) if itmp > 0: @@ -730,9 +722,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): lwrt = [] for iper in range(nper): if model.verbose: - print( - f" reading lak dataset 4 - for stress period {iper + 1}" - ) + print(f" reading lak dataset 4 - for stress period {iper + 1}") line = f.readline().rstrip() if model.array_free_format: t = line.split() @@ -743,17 +733,13 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): if itmp > 0: if model.verbose: - print( - f" reading lak dataset 5 - for stress period {iper + 1}" - ) + print(f" reading lak dataset 5 - for stress period {iper + 1}") name = f"LKARR_StressPeriod_{iper}" lakarr = Util3d.load( f, model, (nlay, nrow, ncol), np.int32, name, ext_unit_dict ) if model.verbose: - print( - f" reading lak dataset 6 - for stress period {iper + 1}" - ) + print(f" reading lak dataset 6 - for stress period {iper + 1}") name = f"BDLKNC_StressPeriod_{iper}" bdlknc = Util3d.load( f, @@ -768,9 +754,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): lake_lknc[iper] = bdlknc if model.verbose: - print( - f" reading lak dataset 7 - for stress period {iper + 1}" - ) + print(f" reading lak dataset 7 - for stress period {iper + 1}") line = f.readline().rstrip() t = line.split() nslms = int(t[0]) @@ -803,9 +787,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): sill_data[iper] = ds8 if itmp1 >= 0: if model.verbose: - print( - f" reading lak dataset 9 - for stress period {iper + 1}" - ) + print(f" reading lak dataset 9 - for stress period {iper + 1}") ds9 = {} for n in range(nlakes): line = f.readline().rstrip() @@ -853,9 +835,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): ext_unit_dict, filetype=ModflowLak._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) ipos = 2 diff --git a/flopy/modflow/mflpf.py b/flopy/modflow/mflpf.py index 8dd6fd33b5..9c0412bd30 100644 --- a/flopy/modflow/mflpf.py +++ b/flopy/modflow/mflpf.py @@ -237,9 +237,7 @@ def __init__( nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper # item 1 - self.hdry = ( - hdry # Head in cells that are converted to dry during a simulation - ) + self.hdry = hdry # Head in cells that are converted to dry during a simulation self.nplpf = 0 # number of LPF parameters self.ikcflag = 0 # 1 and -1 are not supported. self.laytyp = Util2d(model, (nlay,), np.int32, laytyp, name="laytyp") @@ -568,9 +566,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if model.verbose: print(f" loading hk layer {k + 1:3d}...") if "hk" not in par_types: - t = Util2d.load( - f, model, (nrow, ncol), np.float32, "hk", ext_unit_dict - ) + t = Util2d.load(f, model, (nrow, ncol), np.float32, "hk", ext_unit_dict) else: line = f.readline() t = mfpar.parameter_fill( @@ -605,9 +601,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if layvka[k] != 0: key = "vani" if "vk" not in par_types and "vani" not in par_types: - t = Util2d.load( - f, model, (nrow, ncol), np.float32, key, ext_unit_dict - ) + t = Util2d.load(f, model, (nrow, ncol), np.float32, key, ext_unit_dict) else: line = f.readline() key = "vk" @@ -694,9 +688,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): ext_unit_dict, filetype=ModflowLpf._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) # create instance of lpf class diff --git a/flopy/modflow/mfmnw1.py b/flopy/modflow/mfmnw1.py index 2f3fa75d52..a141b3ee54 100644 --- a/flopy/modflow/mfmnw1.py +++ b/flopy/modflow/mfmnw1.py @@ -117,22 +117,16 @@ def __init__( self.url = "mnw.html" self.nper = self.parent.nrow_ncol_nlay_nper[-1] self._generate_heading() - self.mxmnw = ( - mxmnw # -maximum number of multi-node wells to be simulated - ) + self.mxmnw = mxmnw # -maximum number of multi-node wells to be simulated self.iwelpt = iwelpt # -verbosity flag self.nomoiter = nomoiter # -integer indicating the number of iterations for which flow in MNW wells is calculated self.kspref = kspref # -alphanumeric key indicating which set of water levels are to be used as reference values for calculating drawdown - self.losstype = ( - losstype # -string indicating head loss type for each well - ) + self.losstype = losstype # -string indicating head loss type for each well self.wel1_bynode_qsum = wel1_bynode_qsum # -nested list containing file names, unit numbers, and ALLTIME flag for auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']] if dtype is not None: self.dtype = dtype else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured - ) + self.dtype = self.get_default_dtype(structured=self.parent.structured) self.stress_period_data = MfList(self, stress_period_data) self.mnwname = mnwname # -string prefix name of file for outputting time series data from MNW1 @@ -298,9 +292,7 @@ def write_file(self): for each in self.wel1_bynode_qsum: if each[0].split(".")[1].lower() == "bynode": if len(each) == 2: - f.write( - "FILE:%s BYNODE:%-10i\n" % (each[0], int(each[1])) - ) + f.write("FILE:%s BYNODE:%-10i\n" % (each[0], int(each[1]))) elif len(each) == 3: f.write( "FILE:%s BYNODE:%-10i %s\n" @@ -311,13 +303,10 @@ def write_file(self): for each in self.wel1_bynode_qsum: if each[0].split(".")[1].lower() == "qsum": if len(each) == 2: - f.write( - "FILE:%s QSUM:%-10i\n" % (each[0], int(each[1])) - ) + f.write("FILE:%s QSUM:%-10i\n" % (each[0], int(each[1]))) elif len(each) == 3: f.write( - "FILE:%s QSUM:%-10i %s\n" - % (each[0], int(each[1]), each[2]) + "FILE:%s QSUM:%-10i %s\n" % (each[0], int(each[1]), each[2]) ) spd = self.stress_period_data.drop("mnw_no") @@ -384,9 +373,7 @@ def getitem(line, txt): return items -def _parse_5( - f, itmp, qfrcmn_default=None, qfrcmx_default=None, qcut_default="" -): +def _parse_5(f, itmp, qfrcmn_default=None, qfrcmx_default=None, qcut_default=""): data = [] mnw_no = 0 mn = False diff --git a/flopy/modflow/mfmnw2.py b/flopy/modflow/mfmnw2.py index 1ba0214968..643a6f6308 100644 --- a/flopy/modflow/mfmnw2.py +++ b/flopy/modflow/mfmnw2.py @@ -492,9 +492,7 @@ def make_node_data(self): """ nnodes = self.nnodes - node_data = ModflowMnw2.get_empty_node_data( - np.abs(nnodes), aux_names=self.aux - ) + node_data = ModflowMnw2.get_empty_node_data(np.abs(nnodes), aux_names=self.aux) names = Mnw.get_item2_names(self) for n in names: @@ -563,8 +561,7 @@ def get_default_spd_dtype(structured=True): ) else: raise NotImplementedError( - "Mnw2: get_default_spd_dtype not implemented for " - "unstructured grids" + "Mnw2: get_default_spd_dtype not implemented for unstructured grids" ) @staticmethod @@ -821,9 +818,7 @@ def _getloc(n): def _getloc(n): """Output for dataset 2d2.""" - fmt = ( - indent + "{0} {0} ".format(float_format) + "{:.0f} {:.0f}" - ) + fmt = indent + "{0} {0} ".format(float_format) + "{:.0f} {:.0f}" return fmt.format( self.node_data.ztop[n], self.node_data.zbotm[n], @@ -863,9 +858,7 @@ def _getloc(n): # dataset 2g if self.pumpcap > 0: fmt = indent + "{0} {0} {0} {0}\n".format(float_format) - f_mnw.write( - fmt.format(self.hlift, self.liftq0, self.liftqmax, self.hwtol) - ) + f_mnw.write(fmt.format(self.hlift, self.liftq0, self.liftqmax, self.hwtol)) # dataset 2h if self.pumpcap > 0: fmt = indent + "{0} {0}\n".format(float_format) @@ -1042,18 +1035,12 @@ def __init__( if node_data is not None: if isinstance(node_data, pd.DataFrame): node_data = node_data.to_records(index=False) - self.node_data = self.get_empty_node_data( - len(node_data), aux_names=aux - ) + self.node_data = self.get_empty_node_data(len(node_data), aux_names=aux) names = [ - n - for n in node_data.dtype.names - if n in self.node_data.dtype.names + n for n in node_data.dtype.names if n in self.node_data.dtype.names ] for n in names: - self.node_data[n] = node_data[ - n - ] # recarray of Mnw properties by node + self.node_data[n] = node_data[n] # recarray of Mnw properties by node self.nodtot = len(self.node_data) self._sort_node_data() @@ -1070,15 +1057,11 @@ def __init__( ) if stress_period_data is not None: stress_period_data = { - per: sp.to_records(index=False) - if isinstance(sp, pd.DataFrame) - else sp + per: sp.to_records(index=False) if isinstance(sp, pd.DataFrame) else sp for per, sp in stress_period_data.items() } for per, data in stress_period_data.items(): - spd = ModflowMnw2.get_empty_stress_period_data( - len(data), aux_names=aux - ) + spd = ModflowMnw2.get_empty_stress_period_data(len(data), aux_names=aux) names = [n for n in data.dtype.names if n in spd.dtype.names] for n in names: spd[n] = data[n] @@ -1155,9 +1138,7 @@ def get_empty_node_data( dtype = ModflowMnw2.get_default_node_dtype(structured=structured) if aux_names is not None: dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray( - maxnodes, dtype, default_value=default_value - ) + return create_empty_recarray(maxnodes, dtype, default_value=default_value) @staticmethod def get_default_node_dtype(structured=True): @@ -1348,9 +1329,7 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): ) mnw[mnwobj.wellid] = mnwobj # master table with all node data - node_data = np.append(node_data, mnwobj.node_data).view( - np.recarray - ) + node_data = np.append(node_data, mnwobj.node_data).view(np.recarray) stress_period_data = {} # stress period data table for package (flopy convention) itmp = [] @@ -1369,9 +1348,7 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): ) hlim, qcut, qfrcmn, qfrcmx = 0, 0, 0, 0 if mnw[wellid].qlimit < 0: - hlim, qcut, qfrcmn, qfrcmx = _parse_4b( - get_next_line(f) - ) + hlim, qcut, qfrcmn, qfrcmx = _parse_4b(get_next_line(f)) # update package stress period data table ndw = node_data[node_data.wellid == wellid] kij = [ndw.k[0], ndw.i[0], ndw.j[0]] @@ -1401,9 +1378,9 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): pass else: # copy pumping rates from previous stress period - mnw[wellid].stress_period_data[per] = mnw[ - wellid - ].stress_period_data[per - 1] + mnw[wellid].stress_period_data[per] = mnw[wellid].stress_period_data[ + per - 1 + ] itmp.append(itmp_per) if openfile: @@ -1547,9 +1524,7 @@ def make_mnw_objects(self): for wellid in mnws: nd = node_data[node_data.wellid == wellid] nnodes = Mnw.get_nnodes(nd) - mnwspd = Mnw.get_empty_stress_period_data( - self.nper, aux_names=self.aux - ) + mnwspd = Mnw.get_empty_stress_period_data(self.nper, aux_names=self.aux) for per, itmp in enumerate(self.itmp): inds = stress_period_data[per].wellid == wellid if itmp > 0 and np.any(inds): @@ -1625,10 +1600,8 @@ def make_stress_period_data(self, mnwobjs): stress_period_data = {} for per, itmp in enumerate(self.itmp): if itmp > 0: - stress_period_data[per] = ( - ModflowMnw2.get_empty_stress_period_data( - itmp, aux_names=self.aux - ) + stress_period_data[per] = ModflowMnw2.get_empty_stress_period_data( + itmp, aux_names=self.aux ) i = 0 for mnw in mnwobjs: @@ -1643,9 +1616,9 @@ def make_stress_period_data(self, mnwobjs): ] stress_period_data[per]["wellid"][i - 1] = mnw.wellid for n in names: - stress_period_data[per][n][i - 1] = ( - mnw.stress_period_data[n][per] - ) + stress_period_data[per][n][i - 1] = mnw.stress_period_data[ + n + ][per] stress_period_data[per].sort(order="wellid") if i < itmp: raise ItmpError(itmp, i) @@ -1688,9 +1661,9 @@ def export(self, f, **kwargs): for per in self.stress_period_data.data.keys(): for col in todrop: inds = self.stress_period_data[per].wellid == wellid - self.stress_period_data[per][col][inds] = ( - self.node_data[wellnd][col] - ) + self.stress_period_data[per][col][inds] = self.node_data[ + wellnd + ][col] self.node_data_MfList = self.node_data_MfList.drop(todrop) """ todrop = {'qfrcmx', 'qfrcmn'} @@ -1731,9 +1704,7 @@ def _write_1(self, f_mnw): f_mnw.write(f" aux {abc}") f_mnw.write("\n") - def write_file( - self, filename=None, float_format=" {:15.7E}", use_tables=True - ): + def write_file(self, filename=None, float_format=" {:15.7E}", use_tables=True): """ Write the package file. @@ -1768,9 +1739,7 @@ def write_file( # need a method that assigns attributes from table to objects! # call make_mnw_objects?? (table is definitive then) if use_tables: - mnws = np.unique( - self.node_data.wellid - ).tolist() # preserve any order + mnws = np.unique(self.node_data.wellid).tolist() # preserve any order else: mnws = self.mnw.values() for k in mnws: @@ -1789,36 +1758,28 @@ def write_file( if self.mnw[wellid].pumpcap > 0: fmt = " " + float_format f_mnw.write( - fmt.format( - *self.stress_period_data[per].capmult[n] - ) + fmt.format(*self.stress_period_data[per].capmult[n]) ) if qdes > 0 and self.gwt: - f_mnw.write( - fmt.format(*self.stress_period_data[per].cprime[n]) - ) + f_mnw.write(fmt.format(*self.stress_period_data[per].cprime[n])) if len(self.aux) > 0: for var in self.aux: fmt = " " + float_format f_mnw.write( - fmt.format( - *self.stress_period_data[per][var][n] - ) + fmt.format(*self.stress_period_data[per][var][n]) ) f_mnw.write("\n") if self.mnw[wellid].qlimit < 0: - hlim, qcut = self.stress_period_data[per][ - ["hlim", "qcut"] - ][n] + hlim, qcut = self.stress_period_data[per][["hlim", "qcut"]][n] fmt = float_format + " {:.0f}" f_mnw.write(fmt.format(hlim, qcut)) if qcut != 0: fmt = " {} {}".format(float_format) f_mnw.write( fmt.format( - *self.stress_period_data[per][ - ["qfrcmn", "qfrcmx"] - ][n] + *self.stress_period_data[per][["qfrcmn", "qfrcmx"]][ + n + ] ) ) f_mnw.write("\n") @@ -1854,9 +1815,7 @@ def _parse_1(line): option = [] # aux names if len(line) > 0: option += [ - line[i] - for i in np.arange(1, len(line)) - if "aux" in line[i - 1].lower() + line[i] for i in np.arange(1, len(line)) if "aux" in line[i - 1].lower() ] return mnwmax, nodtot, ipakcb, mnwprint, option @@ -1910,9 +1869,7 @@ def _parse_2(f): d2dw = dict(zip(["rw", "rskin", "kskin", "B", "C", "P", "cwc"], [0] * 7)) if losstype.lower() != "none": # update d2dw items - d2dw.update( - _parse_2c(get_next_line(f), losstype) - ) # dict of values for well + d2dw.update(_parse_2c(get_next_line(f), losstype)) # dict of values for well for k, v in d2dw.items(): if v > 0: d2d[k].append(v) @@ -2038,9 +1995,7 @@ def _parse_2(f): ) -def _parse_2c( - line, losstype, rw=-1, rskin=-1, kskin=-1, B=-1, C=-1, P=-1, cwc=-1 -): +def _parse_2c(line, losstype, rw=-1, rskin=-1, kskin=-1, B=-1, C=-1, P=-1, cwc=-1): """ Parameters diff --git a/flopy/modflow/mfmnwi.py b/flopy/modflow/mfmnwi.py index a0fbdb89d0..1eb1ca8dba 100644 --- a/flopy/modflow/mfmnwi.py +++ b/flopy/modflow/mfmnwi.py @@ -151,20 +151,12 @@ def __init__( self.mnwobs = mnwobs # list of lists containing wells and related information to be # output (length = [MNWOBS][4or5]) - self.wellid_unit_qndflag_qhbflag_concflag = ( - wellid_unit_qndflag_qhbflag_concflag - ) + self.wellid_unit_qndflag_qhbflag_concflag = wellid_unit_qndflag_qhbflag_concflag # -input format checks: - assert ( - self.wel1flag >= 0 - ), "WEL1flag must be greater than or equal to zero." - assert ( - self.qsumflag >= 0 - ), "QSUMflag must be greater than or equal to zero." - assert ( - self.byndflag >= 0 - ), "BYNDflag must be greater than or equal to zero." + assert self.wel1flag >= 0, "WEL1flag must be greater than or equal to zero." + assert self.qsumflag >= 0, "QSUMflag must be greater than or equal to zero." + assert self.byndflag >= 0, "BYNDflag must be greater than or equal to zero." if len(self.wellid_unit_qndflag_qhbflag_concflag) != self.mnwobs: print( @@ -234,22 +226,14 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): ext_unit_dict, filetype=ModflowMnwi._ftype() ) if wel1flag > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=wel1flag - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=wel1flag) if qsumflag > 0: - iu, filenames[2] = model.get_ext_dict_attr( - ext_unit_dict, unit=qsumflag - ) + iu, filenames[2] = model.get_ext_dict_attr(ext_unit_dict, unit=qsumflag) if byndflag > 0: - iu, filenames[3] = model.get_ext_dict_attr( - ext_unit_dict, unit=byndflag - ) + iu, filenames[3] = model.get_ext_dict_attr(ext_unit_dict, unit=byndflag) idx = 4 for unit in unique_units: - iu, filenames[idx] = model.get_ext_dict_attr( - ext_unit_dict, unit=unit - ) + iu, filenames[idx] = model.get_ext_dict_attr(ext_unit_dict, unit=unit) idx += 1 return cls( @@ -331,12 +315,8 @@ def write_file(self): unit = t[1] qndflag = t[2] qhbflag = t[3] - assert ( - qndflag >= 0 - ), "QNDflag must be greater than or equal to zero." - assert ( - qhbflag >= 0 - ), "QHBflag must be greater than or equal to zero." + assert qndflag >= 0, "QNDflag must be greater than or equal to zero." + assert qhbflag >= 0, "QHBflag must be greater than or equal to zero." line = f"{wellid:20s} " line += f"{unit:5d} " line += f"{qndflag:5d} " diff --git a/flopy/modflow/mfnwt.py b/flopy/modflow/mfnwt.py index 52b3e4ddb3..e859b98c90 100644 --- a/flopy/modflow/mfnwt.py +++ b/flopy/modflow/mfnwt.py @@ -403,9 +403,7 @@ def load(cls, f, model, ext_unit_dict=None): # dataset 0 -- header flines = [ - line.strip() - for line in f.readlines() - if not line.strip().startswith("#") + line.strip() for line in f.readlines() if not line.strip().startswith("#") ] if openfile: diff --git a/flopy/modflow/mfoc.py b/flopy/modflow/mfoc.py index b98f38ac71..3f73ff79c5 100644 --- a/flopy/modflow/mfoc.py +++ b/flopy/modflow/mfoc.py @@ -338,9 +338,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None): if dis is None: dis = self.parent.get_package("DISU") if dis is None: - chk._add_to_summary( - "Error", package="OC", desc="DIS package not available" - ) + chk._add_to_summary("Error", package="OC", desc="DIS package not available") else: # generate possible actions expected expected_actions = [] @@ -569,9 +567,7 @@ def reset_budgetunit(self, budgetunit=None, fname=None): for pp in self.parent.packagelist: if hasattr(pp, "ipakcb"): pp.ipakcb = self.iubud - self.parent.add_output_file( - pp.ipakcb, fname=fname, package=pp.name - ) + self.parent.add_output_file(pp.ipakcb, fname=fname, package=pp.name) return @@ -689,9 +685,7 @@ def get_ocoutput_units(f, ext_unit_dict=None): return ihedun, fhead, iddnun, fddn @classmethod - def load( - cls, f, model, nper=None, nstp=None, nlay=None, ext_unit_dict=None - ): + def load(cls, f, model, nper=None, nstp=None, nlay=None, ext_unit_dict=None): """ Load an existing package. diff --git a/flopy/modflow/mfpar.py b/flopy/modflow/mfpar.py index de4cea6c4a..eb12236354 100644 --- a/flopy/modflow/mfpar.py +++ b/flopy/modflow/mfpar.py @@ -315,16 +315,12 @@ def parameter_fill(model, shape, findkey, parm_dict, findlayer=None): if mltarr.lower() == "none": mult = np.ones(shape, dtype=dtype) else: - mult = model.mfpar.mult.mult_dict[mltarr.lower()][ - :, : - ] + mult = model.mfpar.mult.mult_dict[mltarr.lower()][:, :] if zonarr.lower() == "all": cluster_data = pv * mult else: mult_save = np.copy(mult) - za = model.mfpar.zone.zone_dict[zonarr.lower()][ - :, : - ] + za = model.mfpar.zone.zone_dict[zonarr.lower()][:, :] # build a multiplier for all of the izones mult = np.zeros(shape, dtype=dtype) for iz in izones: diff --git a/flopy/modflow/mfpbc.py b/flopy/modflow/mfpbc.py index 9853c55ff6..3fed2841a7 100644 --- a/flopy/modflow/mfpbc.py +++ b/flopy/modflow/mfpbc.py @@ -91,9 +91,7 @@ def write_file(self): f_pbc.write(f"{itmp:10d}{ctmp:10d}{self.np:10d}\n") if n < len(self.layer_row_column_data): for b in a: - f_pbc.write( - f"{b[0]:10d}{b[1]:10d}{b[2]:10d}{b[3]:10d}{b[4]:10d}\n" - ) + f_pbc.write(f"{b[0]:10d}{b[1]:10d}{b[2]:10d}{b[3]:10d}{b[4]:10d}\n") if n < len(self.cosines): for d in c: f_pbc.write(f"{d[0]:10g}{d[1]:10g}{d[2]:10g}\n") diff --git a/flopy/modflow/mfpcgn.py b/flopy/modflow/mfpcgn.py index 21c4e2eff8..4775b0c226 100644 --- a/flopy/modflow/mfpcgn.py +++ b/flopy/modflow/mfpcgn.py @@ -336,16 +336,12 @@ def write_file(self): else: # dataset 1 sfmt = " {0:9d} {1:9d} {2:9.3g} {3:9.3g}\n" - line = sfmt.format( - self.iter_mo, self.iter_mi, self.close_r, self.close_h - ) + line = sfmt.format(self.iter_mo, self.iter_mi, self.close_r, self.close_h) f.write(line) # dataset 2 sfmt = " {0:9.3g} {1:9d} {2:9d} {3:9d}\n" - line = sfmt.format( - self.relax, self.ifill, self.unit_pc, self.unit_ts - ) + line = sfmt.format(self.relax, self.ifill, self.unit_pc, self.unit_ts) f.write(line) # dataset 3 @@ -516,17 +512,11 @@ def load(cls, f, model, ext_unit_dict=None): ext_unit_dict, filetype=ModflowPcgn._ftype() ) if unit_pc > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=unit_pc - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=unit_pc) if unit_ts > 0: - iu, filenames[2] = model.get_ext_dict_attr( - ext_unit_dict, unit=unit_ts - ) + iu, filenames[2] = model.get_ext_dict_attr(ext_unit_dict, unit=unit_ts) if ipunit > 0: - iu, filenames[3] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipunit - ) + iu, filenames[3] = model.get_ext_dict_attr(ext_unit_dict, unit=ipunit) return cls( model, diff --git a/flopy/modflow/mfpks.py b/flopy/modflow/mfpks.py index 566898ff30..688098f91c 100644 --- a/flopy/modflow/mfpks.py +++ b/flopy/modflow/mfpks.py @@ -245,10 +245,7 @@ def load(cls, f, model, ext_unit_dict=None): # dataset 0 -- header - print( - " Warning: " - "load method not completed. default pks object created." - ) + print(" Warning: load method not completed. default pks object created.") if openfile: f.close() diff --git a/flopy/modflow/mfrch.py b/flopy/modflow/mfrch.py index 915619965c..f1279a4f94 100644 --- a/flopy/modflow/mfrch.py +++ b/flopy/modflow/mfrch.py @@ -129,13 +129,9 @@ def __init__( rech_u2d_shape = get_pak_vals_shape(model, rech) irch_u2d_shape = get_pak_vals_shape(model, irch) - self.rech = Transient2d( - model, rech_u2d_shape, np.float32, rech, name="rech_" - ) + self.rech = Transient2d(model, rech_u2d_shape, np.float32, rech, name="rech_") if self.nrchop == 2: - self.irch = Transient2d( - model, irch_u2d_shape, np.int32, irch, name="irch_" - ) + self.irch = Transient2d(model, irch_u2d_shape, np.int32, irch, name="irch_") else: self.irch = None self.np = 0 @@ -195,9 +191,7 @@ def check( active = np.ones(self.rech.array[0][0].shape, dtype=bool) # check for unusually high or low values of mean R/T - hk_package = {"UPW", "LPF"}.intersection( - set(self.parent.get_package_list()) - ) + hk_package = {"UPW", "LPF"}.intersection(set(self.parent.get_package_list())) if len(hk_package) > 0 and self.parent.structured: pkg = list(hk_package)[0] @@ -214,9 +208,7 @@ def check( ) l = 0 for i, cbd in enumerate(self.parent.dis.laycbd): - thickness[i, :, :] = self.parent.modelgrid.cell_thickness[ - l, :, : - ] + thickness[i, :, :] = self.parent.modelgrid.cell_thickness[l, :, :] if cbd > 0: l += 1 l += 1 @@ -243,12 +235,8 @@ def check( "\r Mean R/T ratio < checker warning threshold of " f"{RTmin} for {len(lessthan)} stress periods" ) - chk._add_to_summary( - type="Warning", value=R_T.min(), desc=txt - ) - chk.remove_passed( - f"Mean R/T is between {RTmin} and {RTmax}" - ) + chk._add_to_summary(type="Warning", value=R_T.min(), desc=txt) + chk.remove_passed(f"Mean R/T is between {RTmin} and {RTmax}") if len(greaterthan) > 0: txt = ( @@ -256,16 +244,10 @@ def check( f"threshold of {RTmax} for " f"{len(greaterthan)} stress periods" ) - chk._add_to_summary( - type="Warning", value=R_T.max(), desc=txt - ) - chk.remove_passed( - f"Mean R/T is between {RTmin} and {RTmax}" - ) + chk._add_to_summary(type="Warning", value=R_T.max(), desc=txt) + chk.remove_passed(f"Mean R/T is between {RTmin} and {RTmax}") elif len(lessthan) == 0 and len(greaterthan) == 0: - chk.append_passed( - f"Mean R/T is between {RTmin} and {RTmax}" - ) + chk.append_passed(f"Mean R/T is between {RTmin} and {RTmax}") # check for NRCHOP values != 3 if self.nrchop != 3: @@ -333,10 +315,7 @@ def write_file(self, check=True, f=None): ) if not self.parent.structured: mxndrch = np.max( - [ - u2d.array.size - for kper, u2d in self.irch.transient_2ds.items() - ] + [u2d.array.size for kper, u2d in self.irch.transient_2ds.items()] ) f_rch.write(f"{mxndrch:10d}\n") @@ -348,9 +327,7 @@ def write_file(self, check=True, f=None): inirch = self.rech[kper].array.size else: inirch = -1 - f_rch.write( - f"{inrech:10d}{inirch:10d} # Stress period {kper + 1}\n" - ) + f_rch.write(f"{inrech:10d}{inirch:10d} # Stress period {kper + 1}\n") if inrech >= 0: f_rch.write(file_entry_rech) if self.nrchop == 2: @@ -414,9 +391,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True): npar = int(raw[1]) if npar > 0: if model.verbose: - print( - f" Parameters detected. Number of parameters = {npar}" - ) + print(f" Parameters detected. Number of parameters = {npar}") line = f.readline() # dataset 2 t = line_parse(line) @@ -463,9 +438,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True): if inrech >= 0: if npar == 0: if model.verbose: - print( - f" loading rech stress period {iper + 1:3d}..." - ) + print(f" loading rech stress period {iper + 1:3d}...") t = Util2d.load( f, model, @@ -490,9 +463,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True): except: iname = "static" parm_dict[pname] = iname - t = mfparbc.parameter_bcfill( - model, u2d_shape, parm_dict, pak_parms - ) + t = mfparbc.parameter_bcfill(model, u2d_shape, parm_dict, pak_parms) current_rech = t rech[iper] = current_rech @@ -500,9 +471,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True): if nrchop == 2: if inirch >= 0: if model.verbose: - print( - f" loading irch stress period {iper + 1:3d}..." - ) + print(f" loading irch stress period {iper + 1:3d}...") t = Util2d.load( f, model, u2d_shape, np.int32, "irch", ext_unit_dict ) @@ -522,9 +491,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None, check=True): ext_unit_dict, filetype=ModflowRch._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) # create recharge package instance diff --git a/flopy/modflow/mfriv.py b/flopy/modflow/mfriv.py index 63e0ee155d..f8b80ed1b5 100644 --- a/flopy/modflow/mfriv.py +++ b/flopy/modflow/mfriv.py @@ -155,9 +155,7 @@ def __init__( if dtype is not None: self.dtype = dtype else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured - ) + self.dtype = self.get_default_dtype(structured=self.parent.structured) self.stress_period_data = MfList(self, stress_period_data) self.parent.add_package(self) @@ -200,11 +198,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None): if isinstance(data, pd.DataFrame): data = data.to_records(index=False).astype(self.dtype) spd = data - inds = ( - (spd.k, spd.i, spd.j) - if self.parent.structured - else (spd.node) - ) + inds = (spd.k, spd.i, spd.j) if self.parent.structured else (spd.node) # check that river stage and bottom are above model cell # bottoms also checks for nan values diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py index 9d5336e816..c6e1e53ae0 100644 --- a/flopy/modflow/mfsfr2.py +++ b/flopy/modflow/mfsfr2.py @@ -413,9 +413,7 @@ def __init__( # number of reaches, negative value is flag for unsat. # flow beneath streams and/or transient routing self._nstrm = ( - np.sign(nstrm) * len(reach_data) - if reach_data is not None - else nstrm + np.sign(nstrm) * len(reach_data) if reach_data is not None else nstrm ) if segment_data is not None: # segment_data is a zero-d array @@ -435,9 +433,7 @@ def __init__( self.nparseg = nparseg # conversion factor used in calculating stream depth for stream reach (icalc = 1 or 2) self._const = const if const is not None else None - self.dleak = ( - dleak # tolerance level of stream depth used in computing leakage - ) + self.dleak = dleak # tolerance level of stream depth used in computing leakage # flag; unit number for writing table of SFR output to text file self.istcb2 = istcb2 @@ -474,9 +470,7 @@ def __init__( self.reach_data[n] = reach_data[n] # assign node numbers if there are none (structured grid) - if np.diff( - self.reach_data.node - ).max() == 0 and self.parent.has_package("DIS"): + if np.diff(self.reach_data.node).max() == 0 and self.parent.has_package("DIS"): # first make kij list lrc = np.array(self.reach_data)[["k", "i", "j"]].tolist() self.reach_data["node"] = self.parent.dis.get_node(lrc) @@ -521,12 +515,7 @@ def __init__( self.reach_data["iseg"] = 1 consistent_seg_numbers = ( - len( - set(self.reach_data.iseg).difference( - set(self.graph.keys()) - ) - ) - == 0 + len(set(self.reach_data.iseg).difference(set(self.graph.keys()))) == 0 ) if not consistent_seg_numbers: warnings.warn( @@ -535,9 +524,7 @@ def __init__( # first convert any not_a_segment_values to 0 for v in self.not_a_segment_values: - self.segment_data[0].outseg[ - self.segment_data[0].outseg == v - ] = 0 + self.segment_data[0].outseg[self.segment_data[0].outseg == v] = 0 self.set_outreaches() self.channel_geometry_data = channel_geometry_data self.channel_flow_data = channel_flow_data @@ -597,9 +584,7 @@ def nstrm(self): @property def nper(self): nper = self.parent.nrow_ncol_nlay_nper[-1] - nper = ( - 1 if nper == 0 else nper - ) # otherwise iterations from 0, nper won't run + nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run return nper @property @@ -655,9 +640,7 @@ def _make_graph(self): for recarray in self.segment_data.values(): graph.update(dict(zip(recarray["nseg"], recarray["outseg"]))) - outlets = set(graph.values()).difference( - set(graph.keys()) - ) # including lakes + outlets = set(graph.values()).difference(set(graph.keys())) # including lakes graph.update({o: 0 for o in outlets if o != 0}) return graph @@ -693,9 +676,7 @@ def get_empty_segment_data(nsegments=0, aux_names=None, default_value=0.0): dtype = ModflowSfr2.get_default_segment_dtype() if aux_names is not None: dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - d = create_empty_recarray( - nsegments, dtype, default_value=default_value - ) + d = create_empty_recarray(nsegments, dtype, default_value=default_value) return d @staticmethod @@ -872,9 +853,7 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): # set column names, dtypes names = _get_item2_names(nstrm, reachinput, isfropt, structured) dtypes = [ - d - for d in ModflowSfr2.get_default_reach_dtype().descr - if d[0] in names + d for d in ModflowSfr2.get_default_reach_dtype().descr if d[0] in names ] lines = [] @@ -930,9 +909,9 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): # of this logic # https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/sfr.htm dataset_6b, dataset_6c = (0,) * 9, (0,) * 9 - if not ( - isfropt in [2, 3] and icalc == 1 and i > 1 - ) and not (isfropt in [1, 2, 3] and icalc >= 2): + if not (isfropt in [2, 3] and icalc == 1 and i > 1) and not ( + isfropt in [1, 2, 3] and icalc >= 2 + ): dataset_6b = _parse_6bc( f.readline(), icalc, @@ -955,17 +934,10 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): # ATL: not sure exactly how isfropt logic functions for this # dataset 6d description suggests that this line isn't read for isfropt > 1 # but description of icalc suggest that icalc=2 (8-point channel) can be used with any isfropt - if ( - i == 0 - or nstrm > 0 - and not reachinput - or isfropt <= 1 - ): + if i == 0 or nstrm > 0 and not reachinput or isfropt <= 1: dataset_6d = [] for _ in range(2): - dataset_6d.append( - _get_dataset(f.readline(), [0.0] * 8) - ) + dataset_6d.append(_get_dataset(f.readline(), [0.0] * 8)) current_6d[temp_nseg] = dataset_6d if icalc == 4: nstrpts = dataset_6a[5] @@ -985,9 +957,7 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): if tabfiles and i == 0: for j in range(numtab): - segnum, numval, iunit = map( - int, f.readline().strip().split()[:3] - ) + segnum, numval, iunit = map(int, f.readline().strip().split()[:3]) tabfiles_dict[segnum] = {"numval": numval, "inuit": iunit} else: @@ -1143,9 +1113,7 @@ def assign_layers(self, adjust_botms=False, pad=1.0): print("Fixing elevation conflicts...") botm = self.parent.dis.botm.array.copy() for ib, jb in zip(below_i, below_j): - inds = (self.reach_data.i == ib) & ( - self.reach_data.j == jb - ) + inds = (self.reach_data.i == ib) & (self.reach_data.j == jb) botm[-1, ib, jb] = streambotms[inds].min() - pad l.append(botm[-1, below_i, below_j]) header += ",new_model_botm" @@ -1201,9 +1169,7 @@ def get_outlets(self, level=0, verbose=True): # use graph instead of above loop nrow = len(self.segment_data[per].nseg) - ncol = np.max( - [len(v) if v is not None else 0 for v in self.paths.values()] - ) + ncol = np.max([len(v) if v is not None else 0 for v in self.paths.values()]) all_outsegs = np.zeros((nrow, ncol), dtype=int) for i, (k, v) in enumerate(self.paths.items()): if k > 0: @@ -1239,18 +1205,14 @@ def set_outreaches(self): self.repair_outsegs() rd = self.reach_data outseg = self.graph - reach1IDs = dict( - zip(rd[rd.ireach == 1].iseg, rd[rd.ireach == 1].reachID) - ) + reach1IDs = dict(zip(rd[rd.ireach == 1].iseg, rd[rd.ireach == 1].reachID)) outreach = [] for i in range(len(rd)): # if at the end of reach data or current segment if i + 1 == len(rd) or rd.ireach[i + 1] == 1: nextseg = outseg[rd.iseg[i]] # get next segment if nextseg > 0: # current reach is not an outlet - nextrchid = reach1IDs[ - nextseg - ] # get reach 1 of next segment + nextrchid = reach1IDs[nextseg] # get reach 1 of next segment else: nextrchid = 0 else: # otherwise, it's the next reachID @@ -1258,9 +1220,7 @@ def set_outreaches(self): outreach.append(nextrchid) self.reach_data["outreach"] = outreach - def get_slopes( - self, default_slope=0.001, minimum_slope=0.0001, maximum_slope=1.0 - ): + def get_slopes(self, default_slope=0.001, minimum_slope=0.0001, maximum_slope=1.0): """ Compute slopes by reach using values in strtop (streambed top) and rchlen (reach length) columns of reach_data. The slope for a @@ -1374,20 +1334,14 @@ def get_variable_by_stress_period(self, varname): isvar = all_data.sum(axis=1) != 0 ra = np.rec.fromarrays(all_data[isvar].transpose().copy(), dtype=dtype) segs = self.segment_data[0].nseg[isvar] - isseg = np.array( - [True if s in segs else False for s in self.reach_data.iseg] - ) + isseg = np.array([True if s in segs else False for s in self.reach_data.iseg]) isinlet = isseg & (self.reach_data.ireach == 1) - rd = np.array(self.reach_data[isinlet])[ - ["k", "i", "j", "iseg", "ireach"] - ] + rd = np.array(self.reach_data[isinlet])[["k", "i", "j", "iseg", "ireach"]] ra = recfunctions.merge_arrays([rd, ra], flatten=True, usemask=False) return ra.view(np.recarray) def repair_outsegs(self): - isasegment = np.isin( - self.segment_data[0].outseg, self.segment_data[0].nseg - ) + isasegment = np.isin(self.segment_data[0].outseg, self.segment_data[0].nseg) isasegment = isasegment | (self.segment_data[0].outseg < 0) self.segment_data[0]["outseg"][~isasegment] = 0.0 self._graph = None @@ -1476,9 +1430,7 @@ def renumber_channel_data(d): d2 = None return d2 - self.channel_geometry_data = renumber_channel_data( - self.channel_geometry_data - ) + self.channel_geometry_data = renumber_channel_data(self.channel_geometry_data) self.channel_flow_data = renumber_channel_data(self.channel_flow_data) return r @@ -1579,9 +1531,7 @@ def _get_headwaters(self, per=0): One dimensional array listing all headwater segments. """ upsegs = [ - self.segment_data[per] - .nseg[self.segment_data[per].outseg == s] - .tolist() + self.segment_data[per].nseg[self.segment_data[per].outseg == s].tolist() for s in self.segment_data[0].nseg ] return self.segment_data[per].nseg[ @@ -1672,21 +1622,15 @@ def _write_1c(self, f_sfr): ) # see explanation for dataset 1c in online guide f_sfr.write(f"{self.isfropt:.0f} ") if self.isfropt > 1: - f_sfr.write( - f"{self.nstrail:.0f} {self.isuzn:.0f} {self.nsfrsets:.0f} " - ) + f_sfr.write(f"{self.nstrail:.0f} {self.isuzn:.0f} {self.nsfrsets:.0f} ") if self.nstrm < 0: f_sfr.write(f"{self.isfropt:.0f} ") if self.isfropt > 1: - f_sfr.write( - f"{self.nstrail:.0f} {self.isuzn:.0f} {self.nsfrsets:.0f} " - ) + f_sfr.write(f"{self.nstrail:.0f} {self.isuzn:.0f} {self.nsfrsets:.0f} ") if self.nstrm < 0 or self.transroute: f_sfr.write(f"{self.irtflg:.0f} ") if self.irtflg > 0: - f_sfr.write( - f"{self.numtim:.0f} {self.weight:.8f} {self.flwtol:.8f} " - ) + f_sfr.write(f"{self.numtim:.0f} {self.weight:.8f} {self.flwtol:.8f} ") f_sfr.write("\n") def _write_reach_data(self, f_sfr): @@ -1754,18 +1698,14 @@ def _write_segment_data(self, i, j, f_sfr): bwdth, ) = (0 if v == self.default_value else v for v in seg_dat) - f_sfr.write( - " ".join(fmts[0:4]).format(nseg, icalc, outseg, iupseg) + " " - ) + f_sfr.write(" ".join(fmts[0:4]).format(nseg, icalc, outseg, iupseg) + " ") if iupseg > 0: f_sfr.write(fmts[4].format(iprior) + " ") if icalc == 4: f_sfr.write(fmts[5].format(nstrpts) + " ") - f_sfr.write( - " ".join(fmts[6:10]).format(flow, runoff, etsw, pptsw) + " " - ) + f_sfr.write(" ".join(fmts[6:10]).format(flow, runoff, etsw, pptsw) + " ") if icalc in [1, 2]: f_sfr.write(fmts[10].format(roughch) + " ") @@ -1773,9 +1713,7 @@ def _write_segment_data(self, i, j, f_sfr): f_sfr.write(fmts[11].format(roughbk) + " ") if icalc == 3: - f_sfr.write( - " ".join(fmts[12:16]).format(cdpth, fdpth, awdth, bwdth) + " " - ) + f_sfr.write(" ".join(fmts[12:16]).format(cdpth, fdpth, awdth, bwdth) + " ") f_sfr.write("\n") self._write_6bc( @@ -1822,31 +1760,22 @@ def _write_6bc(self, i, j, f_sfr, cols=()): if self.isfropt in [0, 4, 5] and icalc <= 0: f_sfr.write( - " ".join(fmts[0:5]).format( - hcond, thickm, elevupdn, width, depth - ) - + " " + " ".join(fmts[0:5]).format(hcond, thickm, elevupdn, width, depth) + " " ) elif self.isfropt in [0, 4, 5] and icalc == 1: f_sfr.write(fmts[0].format(hcond) + " ") if i == 0: - f_sfr.write( - " ".join(fmts[1:4]).format(thickm, elevupdn, width) + " " - ) + f_sfr.write(" ".join(fmts[1:4]).format(thickm, elevupdn, width) + " ") if self.isfropt in [4, 5]: - f_sfr.write( - " ".join(fmts[5:8]).format(thts, thti, eps) + " " - ) + f_sfr.write(" ".join(fmts[5:8]).format(thts, thti, eps) + " ") if self.isfropt == 5: f_sfr.write(fmts[8].format(uhc) + " ") elif i > 0 and self.isfropt == 0: - f_sfr.write( - " ".join(fmts[1:4]).format(thickm, elevupdn, width) + " " - ) + f_sfr.write(" ".join(fmts[1:4]).format(thickm, elevupdn, width) + " ") elif self.isfropt in [0, 4, 5] and icalc >= 2: f_sfr.write(fmts[0].format(hcond) + " ") @@ -1857,9 +1786,7 @@ def _write_6bc(self, i, j, f_sfr, cols=()): f_sfr.write(" ".join(fmts[1:3]).format(thickm, elevupdn) + " ") if self.isfropt in [4, 5] and icalc == 2 and i == 0: - f_sfr.write( - " ".join(fmts[3:6]).format(thts, thti, eps) + " " - ) + f_sfr.write(" ".join(fmts[3:6]).format(thts, thti, eps) + " ") if self.isfropt == 5: f_sfr.write(fmts[8].format(uhc) + " ") @@ -1904,10 +1831,7 @@ def write_file(self, filename=None): f_sfr.write(f"{self.heading}\n") # Item 1 - if ( - isinstance(self.options, OptionBlock) - and self.parent.version == "mfnwt" - ): + if isinstance(self.options, OptionBlock) and self.parent.version == "mfnwt": self.options.update_from_package(self) self.options.write_options(f_sfr) elif isinstance(self.options, OptionBlock): @@ -1945,9 +1869,7 @@ def write_file(self, filename=None): or self.isfropt <= 1 ): for k in range(2): - for d in self.channel_geometry_data[i][nseg][ - k - ]: + for d in self.channel_geometry_data[i][nseg][k]: f_sfr.write(f"{d:.2f} ") f_sfr.write("\n") @@ -2217,9 +2139,7 @@ def _boolean_compare( cols = [ c for c in failed_info.dtype.names - if failed_info[c].sum() != 0 - and c != "diff" - and "tmp" not in c + if failed_info[c].sum() != 0 and c != "diff" and "tmp" not in c ] failed_info = recfunctions.append_fields( failed_info[cols].copy(), @@ -2236,9 +2156,7 @@ def _boolean_compare( txt += "\n" return txt - def _txt_footer( - self, headertxt, txt, testname, passed=False, warning=True - ): + def _txt_footer(self, headertxt, txt, testname, passed=False, warning=True): if len(txt) == 0 or passed: txt += "passed." self.passed.append(testname) @@ -2268,9 +2186,7 @@ def for_nans(self): isnan = np.any(np.isnan(np.array(sd.tolist())), axis=1) nansd = sd[isnan] if np.any(isnan): - txt += ( - f"Per {per}: found {len(nanreaches)} segments with nans:\n" - ) + txt += f"Per {per}: found {len(nanreaches)} segments with nans:\n" if self.level == 1: txt += _print_rec_array(nansd, delimiter=" ") if len(txt) == 0: @@ -2285,9 +2201,7 @@ def numbering(self): Checks for continuity in segment and reach numbering """ - headertxt = ( - "Checking for continuity in segment and reach numbering...\n" - ) + headertxt = "Checking for continuity in segment and reach numbering...\n" if self.verbose: print(headertxt.strip()) txt = "" @@ -2317,7 +2231,9 @@ def numbering(self): warning=False, ) - headertxt = "Checking for increasing segment numbers in downstream direction...\n" + headertxt = ( + "Checking for increasing segment numbers in downstream direction...\n" + ) txt = "" passed = False if self.verbose: @@ -2361,12 +2277,8 @@ def routing(self): if self.level == 1: txt += " ".join(map(str, circular_segs)) + "\n" else: - f = os.path.join( - self.sfr.parent._model_ws, "circular_routing.chk.csv" - ) - np.savetxt( - f, circular_segs, fmt="%d", delimiter=",", header=txt - ) + f = os.path.join(self.sfr.parent._model_ws, "circular_routing.chk.csv") + np.savetxt(f, circular_segs, fmt="%d", delimiter=",", header=txt) txt += f"See {f} for details." if self.verbose: print(txt) @@ -2433,14 +2345,9 @@ def routing(self): txt += f"See {fpath} for details." if self.verbose: print(txt) - self._txt_footer( - headertxt, txt, "reach connections", warning=False - ) + self._txt_footer(headertxt, txt, "reach connections", warning=False) else: - txt += ( - "No DIS package or modelgrid object; cannot " - "check reach proximities." - ) + txt += "No DIS package or modelgrid object; cannot check reach proximities." self._txt_footer(headertxt, txt, "") def overlapping_conductance(self, tol=1e-6): @@ -2450,8 +2357,7 @@ def overlapping_conductance(self, tol=1e-6): """ headertxt = ( - "Checking for model cells with multiple non-zero " - "SFR conductances...\n" + "Checking for model cells with multiple non-zero SFR conductances...\n" ) txt = "" if self.verbose: @@ -2467,9 +2373,7 @@ def overlapping_conductance(self, tol=1e-6): for i, (r, c) in enumerate(reach_data[["i", "j"]]): if (r, c) not in uniquerc: uniquerc[(r, c)] = i + 1 - reach_data["node"] = [ - uniquerc[(r, c)] for r, c in reach_data[["i", "j"]] - ] + reach_data["node"] = [uniquerc[(r, c)] for r, c in reach_data[["i", "j"]]] K = reach_data["strhc1"] if K.max() == 0: @@ -2495,9 +2399,7 @@ def overlapping_conductance(self, tol=1e-6): conductances.sort() # list nodes with multiple non-zero SFR reach conductances - if conductances[-1] != 0.0 and ( - conductances[0] / conductances[-1] > tol - ): + if conductances[-1] != 0.0 and (conductances[0] / conductances[-1] > tol): nodes_with_multiple_conductance.update({node}) if len(nodes_with_multiple_conductance) > 0: @@ -2555,9 +2457,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000): with model grid """ - headertxt = ( - f"Checking for streambed tops of less than {min_strtop}...\n" - ) + headertxt = f"Checking for streambed tops of less than {min_strtop}...\n" txt = "" if self.verbose: print(headertxt.strip()) @@ -2570,8 +2470,10 @@ def elevations(self, min_strtop=-10, max_strtop=15000): is_less = self.reach_data.strtop < min_strtop if np.any(is_less): below_minimum = self.reach_data[is_less] - txt += "{} instances of streambed top below minimum found.\n".format( - len(below_minimum) + txt += ( + "{} instances of streambed top below minimum found.\n".format( + len(below_minimum) + ) ) if self.level == 1: txt += "Reaches with low strtop:\n" @@ -2583,9 +2485,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000): passed = True self._txt_footer(headertxt, txt, "minimum streambed top", passed) - headertxt = ( - f"Checking for streambed tops of greater than {max_strtop}...\n" - ) + headertxt = f"Checking for streambed tops of greater than {max_strtop}...\n" txt = "" if self.verbose: print(headertxt.strip()) @@ -2593,10 +2493,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000): passed = False if self.sfr.isfropt in [1, 2, 3]: if np.diff(self.reach_data.strtop).max() == 0: - txt += ( - "isfropt setting of 1,2 or 3 " - "requires strtop information!\n" - ) + txt += "isfropt setting of 1,2 or 3 requires strtop information!\n" else: is_greater = self.reach_data.strtop > max_strtop if np.any(is_greater): @@ -2616,8 +2513,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000): self._txt_footer(headertxt, txt, "maximum streambed top", passed) headertxt = ( - "Checking segment_data for " - "downstream rises in streambed elevation...\n" + "Checking segment_data for downstream rises in streambed elevation...\n" ) txt = "" if self.verbose: @@ -2642,10 +2538,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000): datatype="Segment", ) if len(t) > 0: - txt += ( - "Elevation check requires " - "consecutive segment numbering." - ) + txt += "Elevation check requires consecutive segment numbering." self._txt_footer(headertxt, txt, "") return @@ -2667,15 +2560,9 @@ def elevations(self, min_strtop=-10, max_strtop=15000): # next check for rises between segments non_outlets = segment_data.outseg > 0 - non_outlets_seg_data = segment_data[ - non_outlets - ] # lake outsegs are < 0 + non_outlets_seg_data = segment_data[non_outlets] # lake outsegs are < 0 outseg_elevup = np.array( - [ - segment_data.elevup[o - 1] - for o in segment_data.outseg - if o > 0 - ] + [segment_data.elevup[o - 1] for o in segment_data.outseg if o > 0] ) d_elev2 = outseg_elevup - segment_data.elevdn[non_outlets] non_outlets_seg_data = recfunctions.append_fields( @@ -2715,17 +2602,14 @@ def elevations(self, min_strtop=-10, max_strtop=15000): self._txt_footer(headertxt, txt, "segment elevations", passed) headertxt = ( - "Checking reach_data for " - "downstream rises in streambed elevation...\n" + "Checking reach_data for downstream rises in streambed elevation...\n" ) txt = "" if self.verbose: print(headertxt.strip()) passed = False if ( - self.sfr.nstrm < 0 - or self.sfr.reachinput - and self.sfr.isfropt in [1, 2, 3] + self.sfr.nstrm < 0 or self.sfr.reachinput and self.sfr.isfropt in [1, 2, 3] ): # see SFR input instructions # compute outreaches if they aren't there already if np.diff(self.sfr.reach_data.outreach).max() == 0: @@ -2794,15 +2678,15 @@ def elevations(self, min_strtop=-10, max_strtop=15000): print(headertxt.strip()) txt = "" if self.sfr.parent.dis is None: - txt += "No DIS file supplied; cannot check SFR elevations against model grid." + txt += ( + "No DIS file supplied; cannot check SFR elevations against model grid." + ) self._txt_footer(headertxt, txt, "") return passed = False warning = True if ( - self.sfr.nstrm < 0 - or self.sfr.reachinput - and self.sfr.isfropt in [1, 2, 3] + self.sfr.nstrm < 0 or self.sfr.reachinput and self.sfr.isfropt in [1, 2, 3] ): # see SFR input instructions reach_data = np.array(self.reach_data) i, j, k = reach_data["i"], reach_data["j"], reach_data["k"] @@ -2839,9 +2723,7 @@ def elevations(self, min_strtop=-10, max_strtop=15000): level1txt="Layer bottom violations:", ) if len(txt) > 0: - warning = ( - False # this constitutes an error (MODFLOW won't run) - ) + warning = False # this constitutes an error (MODFLOW won't run) # check streambed elevations in relation to model top tops = self.sfr.parent.dis.top.array[i, j] reach_data = recfunctions.append_fields( @@ -2966,18 +2848,14 @@ def elevations(self, min_strtop=-10, max_strtop=15000): self.sfr.nstrm, self.sfr.isfropt ) passed = True - self._txt_footer( - headertxt, txt, "segment elevations vs. model grid", passed - ) + self._txt_footer(headertxt, txt, "segment elevations vs. model grid", passed) def slope(self, minimum_slope=1e-4, maximum_slope=1.0): """Checks that streambed slopes are greater than or equal to a specified minimum value. Low slope values can cause "backup" or unrealistic stream stages with icalc options where stage is computed. """ - headertxt = ( - f"Checking for streambed slopes of less than {minimum_slope}...\n" - ) + headertxt = f"Checking for streambed slopes of less than {minimum_slope}...\n" txt = "" if self.verbose: print(headertxt.strip()) @@ -2985,9 +2863,7 @@ def slope(self, minimum_slope=1e-4, maximum_slope=1.0): passed = False if self.sfr.isfropt in [1, 2, 3]: if np.diff(self.reach_data.slope).max() == 0: - txt += ( - "isfropt setting of 1,2 or 3 requires slope information!\n" - ) + txt += "isfropt setting of 1,2 or 3 requires slope information!\n" else: is_less = self.reach_data.slope < minimum_slope if np.any(is_less): @@ -3005,7 +2881,9 @@ def slope(self, minimum_slope=1e-4, maximum_slope=1.0): passed = True self._txt_footer(headertxt, txt, "minimum slope", passed) - headertxt = f"Checking for streambed slopes of greater than {maximum_slope}...\n" + headertxt = ( + f"Checking for streambed slopes of greater than {maximum_slope}...\n" + ) txt = "" if self.verbose: print(headertxt.strip()) @@ -3013,9 +2891,7 @@ def slope(self, minimum_slope=1e-4, maximum_slope=1.0): passed = False if self.sfr.isfropt in [1, 2, 3]: if np.diff(self.reach_data.slope).max() == 0: - txt += ( - "isfropt setting of 1,2 or 3 requires slope information!\n" - ) + txt += "isfropt setting of 1,2 or 3 requires slope information!\n" else: is_greater = self.reach_data.slope > maximum_slope @@ -3266,11 +3142,7 @@ def _parse_1c(line, reachinput, transroute): flwtol = float(line.pop(0)) # auxiliary variables (MODFLOW-LGR) - option = [ - line[i] - for i in np.arange(1, len(line)) - if "aux" in line[i - 1].lower() - ] + option = [line[i] for i in np.arange(1, len(line)) if "aux" in line[i - 1].lower()] return ( nstrm, diff --git a/flopy/modflow/mfsor.py b/flopy/modflow/mfsor.py index 720bfc36d5..af0c5b5cfd 100644 --- a/flopy/modflow/mfsor.py +++ b/flopy/modflow/mfsor.py @@ -165,10 +165,7 @@ def load(cls, f, model, ext_unit_dict=None): # dataset 0 -- header - print( - " Warning: load method not completed. " - "Default sor object created." - ) + print(" Warning: load method not completed. Default sor object created.") if openfile: f.close() diff --git a/flopy/modflow/mfstr.py b/flopy/modflow/mfstr.py index 00c9ad1c3f..01ffa75783 100644 --- a/flopy/modflow/mfstr.py +++ b/flopy/modflow/mfstr.py @@ -254,9 +254,7 @@ def __init__( self.set_cbc_output_file(ipakcb, model, filenames[1]) if istcb2 is not None: - model.add_output_file( - istcb2, fname=filenames[2], package=self._ftype() - ) + model.add_output_file(istcb2, fname=filenames[2], package=self._ftype()) else: ipakcb = 0 @@ -375,10 +373,7 @@ def __init__( elif isinstance(d, int): if model.verbose: if d < 0: - print( - " reusing str data from previous " - "stress period" - ) + print(" reusing str data from previous stress period") elif d == 0: print(f" no str data for stress period {key}") else: @@ -411,9 +406,7 @@ def __init__( "from previous stress period" ) elif d == 0: - print( - f" no str segment data for stress period {key}" - ) + print(f" no str segment data for stress period {key}") else: raise Exception( "ModflowStr error: unsupported data type: " @@ -612,16 +605,12 @@ def write_file(self): ds9 = [] for idx in range(self.ntrib): ds9.append(line[idx]) - f_str.write( - write_fixed_var(ds9, length=fmt9, free=free) - ) + f_str.write(write_fixed_var(ds9, length=fmt9, free=free)) # dataset 10 if self.ndiv > 0: for line in sdata: - f_str.write( - write_fixed_var([line[-1]], length=10, free=free) - ) + f_str.write(write_fixed_var([line[-1]], length=10, free=free)) # close the str file f_str.close() @@ -758,9 +747,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): dt = ModflowStr.get_empty( 1, aux_names=aux_names, structured=model.structured ).dtype - pak_parms = mfparbc.load( - f, npstr, dt, model, ext_unit_dict, model.verbose - ) + pak_parms = mfparbc.load(f, npstr, dt, model, ext_unit_dict, model.verbose) if nper is None: nper = model.nper @@ -834,17 +821,13 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): parval = float(par_dict["parval"]) else: try: - parval = float( - model.mfpar.pval.pval_dict[pname] - ) + parval = float(model.mfpar.pval.pval_dict[pname]) except: parval = float(par_dict["parval"]) # fill current parameter data (par_current) for ibnd, t in enumerate(data_dict): - current[ibnd] = tuple( - t[: len(current.dtype.names)] - ) + current[ibnd] = tuple(t[: len(current.dtype.names)]) else: if model.verbose: @@ -942,9 +925,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): ext_unit_dict, filetype=ModflowStr._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) if abs(istcb2) > 0: iu, filenames[2] = model.get_ext_dict_attr( ext_unit_dict, unit=abs(istcb2) diff --git a/flopy/modflow/mfsub.py b/flopy/modflow/mfsub.py index 3a76639cf5..b7b68dec07 100644 --- a/flopy/modflow/mfsub.py +++ b/flopy/modflow/mfsub.py @@ -473,9 +473,7 @@ def write_file(self, check=False, f=None): f"{self.ipakcb} {self.isuboc} {self.nndb} {self.ndb} {self.nmz} {self.nn} " ) - f.write( - f"{self.ac1} {self.ac2} {self.itmin} {self.idsave} {self.idrest}" - ) + f.write(f"{self.ac1} {self.ac2} {self.itmin} {self.idsave} {self.idrest}") line = "" if self.idbit is not None: line += f" {self.idbit}" @@ -815,14 +813,10 @@ def load(cls, f, model, ext_unit_dict=None): ext_unit_dict, filetype=ModflowSub._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) if idsave > 0: - iu, filenames[2] = model.get_ext_dict_attr( - ext_unit_dict, unit=idsave - ) + iu, filenames[2] = model.get_ext_dict_attr(ext_unit_dict, unit=idsave) if isuboc > 0: ipos = 3 diff --git a/flopy/modflow/mfswi2.py b/flopy/modflow/mfswi2.py index c5e6fb9b02..86c4ce19f7 100644 --- a/flopy/modflow/mfswi2.py +++ b/flopy/modflow/mfswi2.py @@ -352,13 +352,9 @@ def __init__( # Create arrays so that they have the correct size if self.istrat == 1: - self.nu = Util2d( - model, (self.nsrf + 1,), np.float32, nu, name="nu" - ) + self.nu = Util2d(model, (self.nsrf + 1,), np.float32, nu, name="nu") else: - self.nu = Util2d( - model, (self.nsrf + 2,), np.float32, nu, name="nu" - ) + self.nu = Util2d(model, (self.nsrf + 2,), np.float32, nu, name="nu") self.zeta = [] for i in range(self.nsrf): self.zeta.append( @@ -370,9 +366,7 @@ def __init__( name=f"zeta_{i + 1}", ) ) - self.ssz = Util3d( - model, (nlay, nrow, ncol), np.float32, ssz, name="ssz" - ) + self.ssz = Util3d(model, (nlay, nrow, ncol), np.float32, ssz, name="ssz") self.isource = Util3d( model, (nlay, nrow, ncol), np.int32, isource, name="isource" ) @@ -451,9 +445,7 @@ def write_file(self, check=True, f=None): # write dataset 3b if self.adaptive is True: f.write("# Dataset 3b\n") - f.write( - f"{self.nadptmx:10d}{self.nadptmn:10d}{self.adptfct:14.6g}\n" - ) + f.write(f"{self.nadptmx:10d}{self.nadptmn:10d}{self.adptfct:14.6g}\n") # write dataset 4 f.write("# Dataset 4\n") f.write(self.nu.get_file_entry()) @@ -723,13 +715,9 @@ def load(cls, f, model, ext_unit_dict=None): ext_unit_dict, filetype=ModflowSwi2._ftype() ) if iswizt > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=iswizt - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=iswizt) if ipakcb > 0: - iu, filenames[2] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[2] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) if abs(iswiobs) > 0: iu, filenames[3] = model.get_ext_dict_attr( ext_unit_dict, unit=abs(iswiobs) diff --git a/flopy/modflow/mfswr1.py b/flopy/modflow/mfswr1.py index ae628a9c52..e6100e3006 100644 --- a/flopy/modflow/mfswr1.py +++ b/flopy/modflow/mfswr1.py @@ -55,9 +55,7 @@ class ModflowSwr1(Package): """ - def __init__( - self, model, extension="swr", unitnumber=None, filenames=None - ): + def __init__(self, model, extension="swr", unitnumber=None, filenames=None): # set default unit number of one is not specified if unitnumber is None: unitnumber = ModflowSwr1._defaultunit() @@ -141,9 +139,7 @@ def load(cls, f, model, ext_unit_dict=None): filename = f f = open(filename, "r") - print( - "Warning: load method not completed. default swr1 object created." - ) + print("Warning: load method not completed. default swr1 object created.") if openfile: f.close() diff --git a/flopy/modflow/mfswt.py b/flopy/modflow/mfswt.py index 3a43cbfe9c..19ebd6107e 100644 --- a/flopy/modflow/mfswt.py +++ b/flopy/modflow/mfswt.py @@ -658,23 +658,17 @@ def load(cls, f, model, ext_unit_dict=None): # read dataset 4 if model.verbose: print(" loading swt dataset 4") - gl0 = Util2d.load( - f, model, (nrow, ncol), np.float32, "gl0", ext_unit_dict - ) + gl0 = Util2d.load(f, model, (nrow, ncol), np.float32, "gl0", ext_unit_dict) # read dataset 5 if model.verbose: print(" loading swt dataset 5") - sgm = Util2d.load( - f, model, (nrow, ncol), np.float32, "sgm", ext_unit_dict - ) + sgm = Util2d.load(f, model, (nrow, ncol), np.float32, "sgm", ext_unit_dict) # read dataset 6 if model.verbose: print(" loading swt dataset 6") - sgs = Util2d.load( - f, model, (nrow, ncol), np.float32, "sgs", ext_unit_dict - ) + sgs = Util2d.load(f, model, (nrow, ncol), np.float32, "sgs", ext_unit_dict) # read datasets 7 to 13 thick = [0] * nsystm @@ -842,9 +836,7 @@ def load(cls, f, model, ext_unit_dict=None): ext_unit_dict, filetype=ModflowSwt._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) if iswtoc > 0: ipos = 2 diff --git a/flopy/modflow/mfupw.py b/flopy/modflow/mfupw.py index 3d6b311709..85a40821de 100644 --- a/flopy/modflow/mfupw.py +++ b/flopy/modflow/mfupw.py @@ -161,8 +161,7 @@ def __init__( ): if model.version != "mfnwt": raise Exception( - "Error: model version must be mfnwt to use " - f"{self._ftype()} package" + f"Error: model version must be mfnwt to use {self._ftype()} package" ) # set default unit number of one is not specified @@ -452,9 +451,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if model.verbose: print(f" loading hk layer {k + 1:3d}...") if "hk" not in par_types: - t = Util2d.load( - f, model, (nrow, ncol), np.float32, "hk", ext_unit_dict - ) + t = Util2d.load(f, model, (nrow, ncol), np.float32, "hk", ext_unit_dict) else: line = f.readline() t = mfpar.parameter_fill( @@ -489,9 +486,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if layvka[k] != 0: key = "vani" if "vk" not in par_types and "vani" not in par_types: - t = Util2d.load( - f, model, (nrow, ncol), np.float32, key, ext_unit_dict - ) + t = Util2d.load(f, model, (nrow, ncol), np.float32, key, ext_unit_dict) else: line = f.readline() t = mfpar.parameter_fill( @@ -566,9 +561,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True): ext_unit_dict, filetype=ModflowUpw._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) # create upw object diff --git a/flopy/modflow/mfuzf1.py b/flopy/modflow/mfuzf1.py index 5acc699eb3..9d01dabfd3 100644 --- a/flopy/modflow/mfuzf1.py +++ b/flopy/modflow/mfuzf1.py @@ -521,9 +521,7 @@ def __init__( # Data Set 2 # IUZFBND (NCOL, NROW) -- U2DINT - self.iuzfbnd = Util2d( - model, (nrow, ncol), np.int32, iuzfbnd, name="iuzfbnd" - ) + self.iuzfbnd = Util2d(model, (nrow, ncol), np.int32, iuzfbnd, name="iuzfbnd") # If IRUNFLG > 0: Read item 3 # Data Set 3 @@ -540,9 +538,7 @@ def __init__( self.vks = Util2d(model, (nrow, ncol), np.float32, vks, name="vks") if seepsurfk or specifysurfk: - self.surfk = Util2d( - model, (nrow, ncol), np.float32, surfk, name="surfk" - ) + self.surfk = Util2d(model, (nrow, ncol), np.float32, surfk, name="surfk") if iuzfopt > 0: # Data Set 5 @@ -550,20 +546,14 @@ def __init__( self.eps = Util2d(model, (nrow, ncol), np.float32, eps, name="eps") # Data Set 6a # THTS (NCOL, NROW) -- U2DREL - self.thts = Util2d( - model, (nrow, ncol), np.float32, thts, name="thts" - ) + self.thts = Util2d(model, (nrow, ncol), np.float32, thts, name="thts") # Data Set 6b # THTS (NCOL, NROW) -- U2DREL if self.specifythtr > 0: - self.thtr = Util2d( - model, (nrow, ncol), np.float32, thtr, name="thtr" - ) + self.thtr = Util2d(model, (nrow, ncol), np.float32, thtr, name="thtr") # Data Set 7 # [THTI (NCOL, NROW)] -- U2DREL - self.thti = Util2d( - model, (nrow, ncol), np.float32, thti, name="thti" - ) + self.thti = Util2d(model, (nrow, ncol), np.float32, thti, name="thti") # Data Set 8 # {IFTUNIT: [IUZROW, IUZCOL, IUZOPT]} @@ -574,13 +564,9 @@ def __init__( # Data Set 10 # [FINF (NCOL, NROW)] – U2DREL - self.finf = Transient2d( - model, (nrow, ncol), np.float32, finf, name="finf" - ) + self.finf = Transient2d(model, (nrow, ncol), np.float32, finf, name="finf") if ietflg > 0: - self.pet = Transient2d( - model, (nrow, ncol), np.float32, pet, name="pet" - ) + self.pet = Transient2d(model, (nrow, ncol), np.float32, pet, name="pet") self.extdp = Transient2d( model, (nrow, ncol), np.float32, extdp, name="extdp" ) @@ -696,10 +682,7 @@ def write_file(self, f=None): f_uzf.write(f"{self.heading}\n") # Dataset 1a - if ( - isinstance(self.options, OptionBlock) - and self.parent.version == "mfnwt" - ): + if isinstance(self.options, OptionBlock) and self.parent.version == "mfnwt": self.options.update_from_package(self) self.options.write_options(f_uzf) @@ -708,7 +691,9 @@ def write_file(self, f=None): # Dataset 1b if self.iuzfopt > 0: - comment = " #NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 NTRAIL NSETS NUZGAGES" + comment = ( + " #NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 NTRAIL NSETS NUZGAGES" + ) f_uzf.write( "{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:15.6E}{:100s}\n".format( self.nuztop, @@ -765,10 +750,7 @@ def write_file(self, f=None): f_uzf.write(self.thtr.get_file_entry()) # Data Set 7 # [THTI (NCOL, NROW)] -- U2DREL - if ( - not self.parent.get_package("DIS").steady[0] - or self.specifythti > 0.0 - ): + if not self.parent.get_package("DIS").steady[0] or self.specifythti > 0.0: f_uzf.write(self.thti.get_file_entry()) # If NUZGAG>0: Item 8 is repeated NUZGAG times # Data Set 8 @@ -803,11 +785,7 @@ def write_transient(name): write_transient("extdp") if self.iuzfopt > 0: write_transient("extwc") - if ( - self.capillaryuzet - and "nwt" in self.parent.version - and self.iuzfopt > 0 - ): + if self.capillaryuzet and "nwt" in self.parent.version and self.iuzfopt > 0: write_transient("air_entry") write_transient("hroot") write_transient("rootact") diff --git a/flopy/modflow/mfwel.py b/flopy/modflow/mfwel.py index e211c949a8..742659427b 100644 --- a/flopy/modflow/mfwel.py +++ b/flopy/modflow/mfwel.py @@ -205,9 +205,7 @@ def __init__( if dtype is not None: self.dtype = dtype else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured - ) + self.dtype = self.get_default_dtype(structured=self.parent.structured) # determine if any aux variables in dtype dt = self.get_default_dtype(structured=self.parent.structured) @@ -228,9 +226,7 @@ def __init__( self.options = options # initialize MfList - self.stress_period_data = MfList( - self, stress_period_data, binary=binary - ) + self.stress_period_data = MfList(self, stress_period_data, binary=binary) if add_package: self.parent.add_package(self) @@ -271,10 +267,7 @@ def write_file(self, f=None): f_wel.write(f"{self.heading}\n") - if ( - isinstance(self.options, OptionBlock) - and self.parent.version == "mfnwt" - ): + if isinstance(self.options, OptionBlock) and self.parent.version == "mfnwt": self.options.update_from_package(self) if self.options.block: self.options.write_options(f_wel) @@ -285,9 +278,7 @@ def write_file(self, f=None): if self.options.noprint: line += "NOPRINT " if self.options.auxiliary: - line += " ".join( - [str(aux).upper() for aux in self.options.auxiliary] - ) + line += " ".join([str(aux).upper() for aux in self.options.auxiliary]) else: for opt in self.options: @@ -296,10 +287,7 @@ def write_file(self, f=None): line += "\n" f_wel.write(line) - if ( - isinstance(self.options, OptionBlock) - and self.parent.version == "mfnwt" - ): + if isinstance(self.options, OptionBlock) and self.parent.version == "mfnwt": if not self.options.block: if isinstance(self.options.specify, np.ndarray): self.options.tabfiles = False @@ -307,9 +295,7 @@ def write_file(self, f=None): else: if self.specify and self.parent.version == "mfnwt": - f_wel.write( - f"SPECIFY {self.phiramp:10.5g} {self.iunitramp:10d}\n" - ) + f_wel.write(f"SPECIFY {self.phiramp:10.5g} {self.iunitramp:10d}\n") self.stress_period_data.write_transient(f_wel) f_wel.close() diff --git a/flopy/modflow/mfzon.py b/flopy/modflow/mfzon.py index ca63a8efdd..7d63e2184f 100644 --- a/flopy/modflow/mfzon.py +++ b/flopy/modflow/mfzon.py @@ -177,9 +177,7 @@ def load(cls, f, model, nrow=None, ncol=None, ext_unit_dict=None): if model.verbose: print(f' reading data for "{zonnam:<10s}" zone') # load data - t = Util2d.load( - f, model, (nrow, ncol), np.int32, zonnam, ext_unit_dict - ) + t = Util2d.load(f, model, (nrow, ncol), np.int32, zonnam, ext_unit_dict) # add unit number to list of external files in ext_unit_dict # to remove. if t.locat is not None: diff --git a/flopy/modflowlgr/mflgr.py b/flopy/modflowlgr/mflgr.py index df7c3c0ef9..790ed197b0 100644 --- a/flopy/modflowlgr/mflgr.py +++ b/flopy/modflowlgr/mflgr.py @@ -249,10 +249,7 @@ def _get_path(self, bpth, pth, fpth=""): rpth = fpth else: rpth = os.path.join(rpth, fpth) - msg = ( - "namefiles must be in the same directory as " - "the lgr control file\n" - ) + msg = "namefiles must be in the same directory as the lgr control file\n" msg += f"Control file path: {lpth}\n" msg += f"Namefile path: {mpth}\n" msg += f"Relative path: {rpth}\n" @@ -321,9 +318,7 @@ def write_name_file(self): zip(self.children_models, self.children_data) ): # dataset 6 - pth = self._get_path( - self._model_ws, child._model_ws, fpth=child.namefile - ) + pth = self._get_path(self._model_ws, child._model_ws, fpth=child.namefile) comment = f"data set 6 - child {idx + 1} namefile" line = self._padline(pth, comment=comment) f.write(line) @@ -340,9 +335,7 @@ def write_name_file(self): child_data.iucbhsv, child_data.iucbfsv, ) - comment = ( - f"data set 8 - child {idx + 1} ishflg, ibflg, iucbhsv, iucbfsv" - ) + comment = f"data set 8 - child {idx + 1} ishflg, ibflg, iucbhsv, iucbfsv" line = self._padline(line, comment=comment) f.write(line) @@ -429,8 +422,9 @@ def change_model_ws(self, new_pth=None, reset_external=False): not_valid = new_pth new_pth = os.getcwd() print( - "\n{} not valid, workspace-folder was changed to {}" - "\n".format(not_valid, new_pth) + "\n{} not valid, workspace-folder was changed to {}\n".format( + not_valid, new_pth + ) ) # --reset the model workspace old_pth = self._model_ws @@ -446,9 +440,7 @@ def change_model_ws(self, new_pth=None, reset_external=False): npth = new_pth else: npth = os.path.join(new_pth, rpth) - self.parent.change_model_ws( - new_pth=npth, reset_external=reset_external - ) + self.parent.change_model_ws(new_pth=npth, reset_external=reset_external) # reset model_ws for the children for child in self.children_models: lpth = os.path.abspath(old_pth) diff --git a/flopy/modpath/mp6.py b/flopy/modpath/mp6.py index 7f8b04d663..25f23d19a1 100644 --- a/flopy/modpath/mp6.py +++ b/flopy/modpath/mp6.py @@ -102,9 +102,7 @@ def __init__( # ensure that user-specified files are used iu = self.__mf.oc.iuhead head_file = ( - self.__mf.get_output(unit=iu) - if head_file is None - else head_file + self.__mf.get_output(unit=iu) if head_file is None else head_file ) p = self.__mf.get_package("LPF") if p is None: @@ -118,13 +116,9 @@ def __init__( ) iu = p.ipakcb budget_file = ( - self.__mf.get_output(unit=iu) - if budget_file is None - else budget_file - ) - dis_file = ( - self.__mf.dis.file_name[0] if dis_file is None else dis_file + self.__mf.get_output(unit=iu) if budget_file is None else budget_file ) + dis_file = self.__mf.dis.file_name[0] if dis_file is None else dis_file dis_unit = self.__mf.dis.unit_number[0] nper = self.__mf.dis.nper @@ -187,9 +181,7 @@ def __init__( self.load = load self.__next_ext_unit = 500 if external_path is not None: - assert os.path.exists( - external_path - ), "external_path does not exist" + assert os.path.exists(external_path), "external_path does not exist" self.external = True def __repr__(self): @@ -342,9 +334,7 @@ def create_mpsim( if package.upper() == "WEL": ParticleGenerationOption = 1 if "WEL" not in pak_list: - raise Exception( - "Error: no well package in the passed model" - ) + raise Exception("Error: no well package in the passed model") for kper in range(nper): mflist = self.__mf.wel.stress_period_data[kper] idx = (mflist["k"], mflist["i"], mflist["j"]) @@ -369,9 +359,7 @@ def create_mpsim( ) group_region.append([k, i, j, k, i, j]) if default_ifaces is None: - ifaces.append( - side_faces + [top_face, botm_face] - ) + ifaces.append(side_faces + [top_face, botm_face]) face_ct.append(6) else: ifaces.append(default_ifaces) @@ -381,9 +369,7 @@ def create_mpsim( elif "MNW" in package.upper(): ParticleGenerationOption = 1 if "MNW2" not in pak_list: - raise Exception( - "Error: no MNW2 package in the passed model" - ) + raise Exception("Error: no MNW2 package in the passed model") node_data = self.__mf.mnw2.get_allnode_data() node_data.sort(order=["wellid", "k"]) wellids = np.unique(node_data.wellid) @@ -422,9 +408,7 @@ def append_node(ifaces_well, wellid, node_number, k, i, j): j, ) else: - append_node( - side_faces + [top_face], wellid, 0, k, i, j - ) + append_node(side_faces + [top_face], wellid, 0, k, i, j) for n in range(len(nd))[1:]: k, i, j = nd.k[n], nd.i[n], nd.j[n] if n == len(nd) - 1: @@ -464,9 +448,7 @@ def append_node(ifaces_well, wellid, node_number, k, i, j): if self.__mf is not None: model_ws = self.__mf.model_ws if os.path.exists(os.path.join(model_ws, package)): - print( - "detected a particle starting locations file in packages" - ) + print("detected a particle starting locations file in packages") assert len(packages) == 1, ( "if a particle starting locations file is passed, " "other packages cannot be specified" diff --git a/flopy/modpath/mp6bas.py b/flopy/modpath/mp6bas.py index 141e8397e1..50ccae6d15 100644 --- a/flopy/modpath/mp6bas.py +++ b/flopy/modpath/mp6bas.py @@ -181,9 +181,7 @@ def _create_ltype(self, laytyp): else: # no user passed layertype have_layertype = False if self.parent.getmf() is None: - raise ValueError( - "if modflowmodel is None then laytype must be passed" - ) + raise ValueError("if modflowmodel is None then laytype must be passed") # run though flow packages flow_package = self.parent.getmf().get_package("BCF6") diff --git a/flopy/modpath/mp6sim.py b/flopy/modpath/mp6sim.py index 1cf465958a..fc4ebcc670 100644 --- a/flopy/modpath/mp6sim.py +++ b/flopy/modpath/mp6sim.py @@ -277,9 +277,7 @@ def write_file(self): ReleasePeriodLength, ReleaseEventCount, ) = self.release_times[i] - f_sim.write( - f"{ReleasePeriodLength:f} {ReleaseEventCount}\n" - ) + f_sim.write(f"{ReleasePeriodLength:f} {ReleaseEventCount}\n") # item 15 if GridCellRegionOption == 1: ( @@ -365,9 +363,7 @@ def write_file(self): # item 27 for k in range(self.cell_bd_ct): Grid, Layer, Row, Column = self.bud_loc[k] - f_sim.write( - f"{Grid} {Layer + 1} {Row + 1} {Column + 1} \n" - ) + f_sim.write(f"{Grid} {Layer + 1} {Row + 1} {Column + 1} \n") if self.options_dict["BudgetOutputOption"] == 4: # item 28 f_sim.write(f"{self.trace_file}\n") @@ -418,9 +414,7 @@ def __init__( self.model = model self.use_pandas = use_pandas - self.heading = ( - "# Starting locations file for Modpath, generated by Flopy." - ) + self.heading = "# Starting locations file for Modpath, generated by Flopy." self.input_style = inputstyle if inputstyle != 1: raise NotImplementedError @@ -502,9 +496,7 @@ def _write_particle_data_with_pandas(self, data, float_format): :return: """ # convert float format string to pandas float format - float_format = ( - float_format.replace("{", "").replace("}", "").replace(":", "%") - ) + float_format = float_format.replace("{", "").replace("}", "").replace(":", "%") data = pd.DataFrame(data) if len(data) == 0: return @@ -518,9 +510,7 @@ def _write_particle_data_with_pandas(self, data, float_format): # simple speed test writing particles with flopy and running model took 30 min, writing with pandas took __min loc_path = self.fn_path # write groups - group_dict = dict( - data[["particlegroup", "groupname"]].itertuples(False, None) - ) + group_dict = dict(data[["particlegroup", "groupname"]].itertuples(False, None)) # writing group loc data groups = ( @@ -530,13 +520,9 @@ def _write_particle_data_with_pandas(self, data, float_format): .reset_index() .rename(columns={"groupname": "count"}) ) - groups.loc[:, "groupname"] = groups.loc[:, "particlegroup"].replace( - group_dict - ) + groups.loc[:, "groupname"] = groups.loc[:, "particlegroup"].replace(group_dict) group_count = len(groups.index) - groups = pd.Series( - groups[["groupname", "count"]].astype(str).values.flatten() - ) + groups = pd.Series(groups[["groupname", "count"]].astype(str).values.flatten()) with open(loc_path, "w") as f: f.write(f"{self.heading}\n") f.write(f"{self.input_style:d}\n") diff --git a/flopy/modpath/mp7.py b/flopy/modpath/mp7.py index d2ad195940..4e8d06c44d 100644 --- a/flopy/modpath/mp7.py +++ b/flopy/modpath/mp7.py @@ -115,10 +115,7 @@ def __init__( # if a MFModel instance ensure flowmodel is a MODFLOW 6 GWF model if isinstance(flowmodel, MFModel): - if ( - flowmodel.model_type != "gwf" - and flowmodel.model_type != "gwf6" - ): + if flowmodel.model_type != "gwf" and flowmodel.model_type != "gwf6": raise TypeError( "Modpath7: flow model type must be gwf. " f"Passed model_type is {flowmodel.model_type}." @@ -127,9 +124,7 @@ def __init__( # set flowmodel and flow_version attributes self.flowmodel = flowmodel self.flow_version = self.flowmodel.version - self._flowmodel_ws = os.path.relpath( - flowmodel.model_ws, self._model_ws - ) + self._flowmodel_ws = os.path.relpath(flowmodel.model_ws, self._model_ws) if self.flow_version == "mf6": # get discretization package @@ -184,8 +179,7 @@ def __init__( tdis = self.flowmodel.simulation.get_package("TDIS") if tdis is None: raise Exception( - "TDIS package must be " - "included in the passed MODFLOW 6 model" + "TDIS package must be included in the passed MODFLOW 6 model" ) tdis_file = tdis.filename @@ -209,9 +203,7 @@ def __init__( # set budget file name if budgetfilename is None: - budgetfilename = oc.budget_filerecord.array["budgetfile"][ - 0 - ] + budgetfilename = oc.budget_filerecord.array["budgetfile"][0] else: shape = None # extract data from DIS or DISU files and set shape @@ -339,10 +331,7 @@ def __repr__(self): def laytyp(self): if self.flowmodel.version == "mf6": icelltype = self.flowmodel.npf.icelltype.array - laytyp = [ - icelltype[k].max() - for k in range(self.flowmodel.modelgrid.nlay) - ] + laytyp = [icelltype[k].max() for k in range(self.flowmodel.modelgrid.nlay)] else: p = self.flowmodel.get_package("BCF6") if p is None: @@ -386,9 +375,7 @@ def write_name_file(self): f"{self.grbtag:10s} {os.path.join(self._flowmodel_ws, self.grbdis_file)}\n" ) if self.tdis_file is not None: - f.write( - f"TDIS {os.path.join(self._flowmodel_ws, self.tdis_file)}\n" - ) + f.write(f"TDIS {os.path.join(self._flowmodel_ws, self.tdis_file)}\n") if self.headfilename is not None: f.write( f"HEAD {os.path.join(self._flowmodel_ws, self.headfilename)}\n" diff --git a/flopy/modpath/mp7bas.py b/flopy/modpath/mp7bas.py index 155bb977c8..583462af65 100644 --- a/flopy/modpath/mp7bas.py +++ b/flopy/modpath/mp7bas.py @@ -38,9 +38,7 @@ class Modpath7Bas(Package): """ - def __init__( - self, model, porosity=0.30, defaultiface=None, extension="mpbas" - ): + def __init__(self, model, porosity=0.30, defaultiface=None, extension="mpbas"): unitnumber = model.next_unit() super().__init__(model, extension, "MPBAS", unitnumber) diff --git a/flopy/modpath/mp7particledata.py b/flopy/modpath/mp7particledata.py index 26fad032d1..84b78c0e1f 100644 --- a/flopy/modpath/mp7particledata.py +++ b/flopy/modpath/mp7particledata.py @@ -153,8 +153,7 @@ def __init__( ) else: allint = all( - isinstance(el, (int, np.int32, np.int64)) - for el in partlocs + isinstance(el, (int, np.int32, np.int64)) for el in partlocs ) # convert to a list of tuples if allint: @@ -162,9 +161,7 @@ def __init__( for el in partlocs: t.append((el,)) partlocs = t - alllsttup = all( - isinstance(el, (list, tuple)) for el in partlocs - ) + alllsttup = all(isinstance(el, (list, tuple)) for el in partlocs) if alllsttup: alllen1 = all(len(el) == 1 for el in partlocs) if not alllen1: @@ -183,9 +180,7 @@ def __init__( partlocs = np.array(partlocs) if len(partlocs.shape) == 1: partlocs = partlocs.reshape(len(partlocs), 1) - partlocs = unstructured_to_structured( - np.array(partlocs), dtype=dtype - ) + partlocs = unstructured_to_structured(np.array(partlocs), dtype=dtype) elif isinstance(partlocs, np.ndarray): # reshape and convert dtype if needed if len(partlocs.shape) == 1: @@ -253,9 +248,7 @@ def __init__( timeoffset = 0.0 else: if isinstance(timeoffset, (float, int)): - timeoffset = ( - np.ones(partlocs.shape[0], dtype=np.float32) * timeoffset - ) + timeoffset = np.ones(partlocs.shape[0], dtype=np.float32) * timeoffset elif isinstance(timeoffset, (list, tuple)): timeoffset = np.array(timeoffset, dtype=np.float32) if isinstance(timeoffset, np.ndarray): @@ -313,9 +306,7 @@ def __init__( # create empty particle ncells = partlocs.shape[0] self.dtype = self._get_dtype(structured, particleid) - particledata = create_empty_recarray( - ncells, self.dtype, default_value=0 - ) + particledata = create_empty_recarray(ncells, self.dtype, default_value=0) # fill particle if structured: @@ -416,9 +407,7 @@ def convert(row) -> tuple[float, float, float]: return [ cvt_xy(row.localx, xs), cvt_xy(row.localy, ys), - row.localz - if localz - else cvt_z(row.localz, row.k, row.i, row.j), + row.localz if localz else cvt_z(row.localz, row.k, row.i, row.j), ] else: @@ -848,9 +837,7 @@ def get_extent(grid, k=None, i=None, j=None, nn=None, localz=False) -> Extent: return Extent(minx, maxx, miny, maxy, minz, maxz, xspan, yspan, zspan) -def get_face_release_points( - subdivisiondata, cellid, extent -) -> Iterator[tuple]: +def get_face_release_points(subdivisiondata, cellid, extent) -> Iterator[tuple]: """ Get release points for MODPATH 7 input style 2, template subdivision style 1, i.e. face (2D) subdivision, for the @@ -934,10 +921,7 @@ def get_face_release_points( yield cellid + [p[0], extent.maxy, p[1]] # z1 (bottom) - if ( - subdivisiondata.rowdivisions5 > 0 - and subdivisiondata.columndivisions5 > 0 - ): + if subdivisiondata.rowdivisions5 > 0 and subdivisiondata.columndivisions5 > 0: xincr = extent.xspan / subdivisiondata.columndivisions5 xlocs = [ (extent.minx + (xincr * 0.5) + (xincr * rd)) @@ -952,10 +936,7 @@ def get_face_release_points( yield cellid + [p[0], p[1], extent.minz] # z2 (top) - if ( - subdivisiondata.rowdivisions6 > 0 - and subdivisiondata.columndivisions6 > 0 - ): + if subdivisiondata.rowdivisions6 > 0 and subdivisiondata.columndivisions6 > 0: xincr = extent.xspan / subdivisiondata.columndivisions6 xlocs = [ (extent.minx + (xincr * 0.5) + (xincr * rd)) @@ -970,9 +951,7 @@ def get_face_release_points( yield cellid + [p[0], p[1], extent.maxz] -def get_cell_release_points( - subdivisiondata, cellid, extent -) -> Iterator[tuple]: +def get_cell_release_points(subdivisiondata, cellid, extent) -> Iterator[tuple]: """ Get release points for MODPATH 7 input style 2, template subdivision type 2, i.e. cell (3D) subdivision, for the @@ -1022,9 +1001,7 @@ def get_release_points( elif isinstance(subdivisiondata, CellDataType): return get_cell_release_points(subdivisiondata, cellid, extent) else: - raise ValueError( - f"Unsupported subdivision data type: {type(subdivisiondata)}" - ) + raise ValueError(f"Unsupported subdivision data type: {type(subdivisiondata)}") class LRCParticleData: @@ -1150,9 +1127,7 @@ def write(self, f=None): for sd, region in zip(self.subdivisiondata, self.lrcregions): # item 3 - f.write( - f"{sd.templatesubdivisiontype} {region.shape[0]} {sd.drape}\n" - ) + f.write(f"{sd.templatesubdivisiontype} {region.shape[0]} {sd.drape}\n") # item 4 or 5 sd.write(f) @@ -1215,9 +1190,7 @@ def to_prp(self, grid, localz=False) -> Iterator[tuple]: """ if grid.grid_type != "structured": - raise ValueError( - "Particle representation is structured but grid is not" - ) + raise ValueError("Particle representation is structured but grid is not") irpt_offset = 0 for region in self.lrcregions: @@ -1228,9 +1201,7 @@ def to_prp(self, grid, localz=False) -> Iterator[tuple]: for j in range(minj, maxj + 1): for sd in self.subdivisiondata: for irpt, rpt in enumerate( - get_release_points( - sd, grid, k, i, j, localz=localz - ) + get_release_points(sd, grid, k, i, j, localz=localz) ): assert rpt[0] == k assert rpt[1] == i @@ -1310,8 +1281,7 @@ def __init__(self, subdivisiondata=None, nodes=None): nodes = nodes.reshape(1, nodes.shape[0]) # convert to a list of numpy arrays nodes = [ - np.array(nodes[i, :], dtype=np.int32) - for i in range(nodes.shape[0]) + np.array(nodes[i, :], dtype=np.int32) for i in range(nodes.shape[0]) ] elif isinstance(nodes, (list, tuple)): # convert a single list/tuple to a list of tuples if only one @@ -1320,9 +1290,7 @@ def __init__(self, subdivisiondata=None, nodes=None): if len(nodes) > 1: nodes = [tuple(nodes)] # determine if the list or tuple contains lists or tuples - alllsttup = all( - isinstance(el, (list, tuple, np.ndarray)) for el in nodes - ) + alllsttup = all(isinstance(el, (list, tuple, np.ndarray)) for el in nodes) if not alllsttup: raise TypeError( "{}: nodes should be " @@ -1380,9 +1348,7 @@ def write(self, f=None): for sd, nodes in zip(self.subdivisiondata, self.nodedata): # item 3 - f.write( - f"{sd.templatesubdivisiontype} {nodes.shape[0]} {sd.drape}\n" - ) + f.write(f"{sd.templatesubdivisiontype} {nodes.shape[0]} {sd.drape}\n") # item 4 or 5 sd.write(f) @@ -1417,9 +1383,7 @@ def to_coords(self, grid, localz=False) -> Iterator[tuple]: for sd in self.subdivisiondata: for nd in self.nodedata: - for rpt in get_release_points( - sd, grid, nn=int(nd[0]), localz=localz - ): + for rpt in get_release_points(sd, grid, nn=int(nd[0]), localz=localz): yield (*rpt[1:4],) def to_prp(self, grid, localz=False) -> Iterator[tuple]: diff --git a/flopy/modpath/mp7particlegroup.py b/flopy/modpath/mp7particlegroup.py index 5148138536..1b1ff820e7 100644 --- a/flopy/modpath/mp7particlegroup.py +++ b/flopy/modpath/mp7particlegroup.py @@ -86,9 +86,7 @@ def __init__(self, particlegroupname, filename, releasedata): releasetimecount = int(releasedata[0]) releaseinterval = 0 # convert releasetimes list or tuple to a numpy array - if isinstance(releasedata[1], list) or isinstance( - releasedata[1], tuple - ): + if isinstance(releasedata[1], list) or isinstance(releasedata[1], tuple): releasedata[1] = np.array(releasedata[1]) if releasedata[1].shape[0] != releasetimecount: raise ValueError( @@ -154,9 +152,7 @@ def write(self, fp=None, ws="."): fp.write(f"{self.releasetimecount}\n") # item 31 tp = self.releasetimes - v = Util2d( - self, (tp.shape[0],), np.float32, tp, name="temp", locat=0 - ) + v = Util2d(self, (tp.shape[0],), np.float32, tp, name="temp", locat=0) fp.write(v.string) # item 32 @@ -220,9 +216,7 @@ def __init__( """ # instantiate base class - _Modpath7ParticleGroup.__init__( - self, particlegroupname, filename, releasedata - ) + _Modpath7ParticleGroup.__init__(self, particlegroupname, filename, releasedata) self.name = "ParticleGroup" # create default node-based particle data if not passed @@ -305,9 +299,7 @@ def __init__(self, particlegroupname, filename, releasedata): """ # instantiate base class - _Modpath7ParticleGroup.__init__( - self, particlegroupname, filename, releasedata - ) + _Modpath7ParticleGroup.__init__(self, particlegroupname, filename, releasedata) def write(self, fp=None, ws="."): """ @@ -370,9 +362,7 @@ def __init__( self.name = "ParticleGroupLRCTemplate" # instantiate base class - _ParticleGroupTemplate.__init__( - self, particlegroupname, filename, releasedata - ) + _ParticleGroupTemplate.__init__(self, particlegroupname, filename, releasedata) # validate particledata if particledata is None: particledata = NodeParticleData() @@ -468,9 +458,7 @@ def __init__( self.name = "ParticleGroupNodeTemplate" # instantiate base class - _ParticleGroupTemplate.__init__( - self, particlegroupname, filename, releasedata - ) + _ParticleGroupTemplate.__init__(self, particlegroupname, filename, releasedata) # validate particledata if particledata is None: particledata = NodeParticleData() diff --git a/flopy/modpath/mp7sim.py b/flopy/modpath/mp7sim.py index 0a561f2e6e..3a5e883fde 100644 --- a/flopy/modpath/mp7sim.py +++ b/flopy/modpath/mp7sim.py @@ -315,9 +315,7 @@ def __init__( except: sim_enum_error("weaksourceoption", weaksourceoption, weakOpt) try: - self.budgetoutputoption = budgetOpt[ - budgetoutputoption.lower() - ].value + self.budgetoutputoption = budgetOpt[budgetoutputoption.lower()].value except: sim_enum_error("budgetoutputoption", budgetoutputoption, budgetOpt) # tracemode @@ -520,9 +518,7 @@ def __init__( ) self.stopzone = stopzone if zones is None: - raise ValueError( - "zones must be specified if zonedataoption='on'." - ) + raise ValueError("zones must be specified if zonedataoption='on'.") self.zones = Util3d( model, shape3d, @@ -538,14 +534,11 @@ def __init__( retardationfactoroption.lower() ].value except: - sim_enum_error( - "retardationfactoroption", retardationfactoroption, onoffOpt - ) + sim_enum_error("retardationfactoroption", retardationfactoroption, onoffOpt) if self.retardationfactoroption == 2: if retardation is None: raise ValueError( - "retardation must be specified if " - "retardationfactoroption='on'." + "retardation must be specified if retardationfactoroption='on'." ) self.retardation = Util3d( model, @@ -615,9 +608,7 @@ def write_file(self, check=False): # item 7 and 8 if self.tracemode == 1: f.write(f"{self.tracefilename}\n") - f.write( - f"{self.traceparticlegroup + 1} {self.traceparticleid + 1}\n" - ) + f.write(f"{self.traceparticlegroup + 1} {self.traceparticleid + 1}\n") # item 9 f.write(f"{self.BudgetCellCount}\n") # item 10 @@ -657,9 +648,7 @@ def write_file(self, check=False): f.write(f"{self.timepointoption}\n") if self.timepointoption == 1: # item 17 - f.write( - f"{self.timepointdata[0]} {self.timepointdata[1][0]}\n" - ) + f.write(f"{self.timepointdata[0]} {self.timepointdata[1][0]}\n") elif self.timepointoption == 2: # item 18 f.write(f"{self.timepointdata[0]}\n") diff --git a/flopy/mt3d/mt.py b/flopy/mt3d/mt.py index 45599aceeb..522a6190ca 100644 --- a/flopy/mt3d/mt.py +++ b/flopy/mt3d/mt.py @@ -147,9 +147,7 @@ def __init__( # Check whether specified ftlfile exists in model directory; if not, # warn user - if os.path.isfile( - os.path.join(self.model_ws, f"{modelname}.{namefile_ext}") - ): + if os.path.isfile(os.path.join(self.model_ws, f"{modelname}.{namefile_ext}")): with open( os.path.join(self.model_ws, f"{modelname}.{namefile_ext}") ) as nm_file: @@ -180,10 +178,7 @@ def __init__( ): pass else: - print( - "Specified value of ftlfree conflicts with FTL " - "file format" - ) + print("Specified value of ftlfree conflicts with FTL file format") print( f"Switching ftlfree from {self.ftlfree} to {not self.ftlfree}" ) @@ -396,9 +391,7 @@ def write_name_file(self): ftlfmt = "" if self.ftlfree: ftlfmt = "FREE" - f_nam.write( - f"{'FTL':14s} {self.ftlunit:5d} {self.ftlfilename} {ftlfmt}\n" - ) + f_nam.write(f"{'FTL':14s} {self.ftlunit:5d} {self.ftlfilename} {ftlfmt}\n") # write file entries in name file f_nam.write(str(self.get_name_file_entries())) @@ -407,9 +400,7 @@ def write_name_file(self): f_nam.write(f"DATA {u:5d} {f}\n") # write the output files - for u, f, b in zip( - self.output_units, self.output_fnames, self.output_binflag - ): + for u, f, b in zip(self.output_units, self.output_fnames, self.output_binflag): if u == 0: continue if b: @@ -504,9 +495,7 @@ def load( namefile_path, mt.mfnam_packages, verbose=verbose ) except Exception as e: - raise Exception( - f"error loading name file entries from file:\n{e!s}" - ) + raise Exception(f"error loading name file entries from file:\n{e!s}") if mt.verbose: print( @@ -552,9 +541,7 @@ def load( return None try: - pck = btn.package.load( - btn.filename, mt, ext_unit_dict=ext_unit_dict - ) + pck = btn.package.load(btn.filename, mt, ext_unit_dict=ext_unit_dict) except Exception as e: raise Exception(f"error loading BTN: {e!s}") files_successfully_loaded.append(btn.filename) @@ -608,9 +595,7 @@ def load( ) files_successfully_loaded.append(item.filename) if mt.verbose: - print( - f" {pck.name[0]:4s} package load...success" - ) + print(f" {pck.name[0]:4s} package load...success") except BaseException as o: if mt.verbose: print( @@ -624,9 +609,7 @@ def load( ) files_successfully_loaded.append(item.filename) if mt.verbose: - print( - f" {pck.name[0]:4s} package load...success" - ) + print(f" {pck.name[0]:4s} package load...success") else: if mt.verbose: print(f" {item.filetype:4s} package load...skipped") @@ -651,9 +634,7 @@ def load( elif key not in mt.pop_key_list: mt.external_fnames.append(item.filename) mt.external_units.append(key) - mt.external_binflag.append( - "binary" in item.filetype.lower() - ) + mt.external_binflag.append("binary" in item.filetype.lower()) mt.external_output.append(False) # pop binary output keys and any external file units that are now @@ -674,8 +655,9 @@ def load( # write message indicating packages that were successfully loaded if mt.verbose: print( - "\n The following {} packages were " - "successfully loaded.".format(len(files_successfully_loaded)) + "\n The following {} packages were successfully loaded.".format( + len(files_successfully_loaded) + ) ) for fname in files_successfully_loaded: print(f" {os.path.basename(fname)}") @@ -738,7 +720,9 @@ def load_obs(fname): r : np.ndarray """ - firstline = "STEP TOTAL TIME LOCATION OF OBSERVATION POINTS (K,I,J)" + firstline = ( + "STEP TOTAL TIME LOCATION OF OBSERVATION POINTS (K,I,J)" + ) dtype = [("step", int), ("time", float)] nobs = 0 obs = [] diff --git a/flopy/mt3d/mtadv.py b/flopy/mt3d/mtadv.py index 5227a75f36..a4704f1f12 100644 --- a/flopy/mt3d/mtadv.py +++ b/flopy/mt3d/mtadv.py @@ -232,8 +232,7 @@ def write_file(self): """ f_adv = open(self.fn_path, "w") f_adv.write( - "%10i%10f%10i%10i\n" - % (self.mixelm, self.percel, self.mxpart, self.nadvfd) + "%10i%10f%10i%10i\n" % (self.mixelm, self.percel, self.mxpart, self.nadvfd) ) if self.mixelm > 0: f_adv.write("%10i%10f\n" % (self.itrack, self.wd)) @@ -250,9 +249,7 @@ def write_file(self): ) ) if (self.mixelm == 2) or (self.mixelm == 3): - f_adv.write( - "%10i%10i%10i\n" % (self.interp, self.nlsink, self.npsink) - ) + f_adv.write("%10i%10i%10i\n" % (self.interp, self.nlsink, self.npsink)) if self.mixelm == 3: f_adv.write("%10f\n" % (self.dchmoc)) f_adv.close() diff --git a/flopy/mt3d/mtbtn.py b/flopy/mt3d/mtbtn.py index 1ddc994170..e9697ee592 100644 --- a/flopy/mt3d/mtbtn.py +++ b/flopy/mt3d/mtbtn.py @@ -294,9 +294,7 @@ def __init__( if isinstance(obs, list): obs = np.array(obs) if obs.ndim != 2: - raise Exception( - "obs must be (or be convertible to) a 2d array" - ) + raise Exception("obs must be (or be convertible to) a 2d array") self.obs = obs self.nprobs = nprobs self.chkmas = chkmas @@ -331,15 +329,9 @@ def __init__( name="dt0", array_free_format=False, ) - self.mxstrn = Util2d( - model, (self.nper,), np.int32, mxstrn, name="mxstrn" - ) - self.ttsmult = Util2d( - model, (self.nper,), np.float32, ttsmult, name="ttmult" - ) - self.ttsmax = Util2d( - model, (self.nper,), np.float32, ttsmax, name="ttsmax" - ) + self.mxstrn = Util2d(model, (self.nper,), np.int32, mxstrn, name="mxstrn") + self.ttsmult = Util2d(model, (self.nper,), np.float32, ttsmult, name="ttmult") + self.ttsmax = Util2d(model, (self.nper,), np.float32, ttsmax, name="ttsmax") # Do some fancy stuff for multi-species concentrations self.sconc = [] @@ -677,9 +669,7 @@ def write_file(self): # A3; Keywords # Build a string of the active keywords - if ( - self.parent.version == "mt3d-usgs" - ): # Keywords not supported by MT3Dms + if self.parent.version == "mt3d-usgs": # Keywords not supported by MT3Dms str1 = "" if self.MFStyleArr: str1 += " MODFLOWSTYLEARRAYS" diff --git a/flopy/mt3d/mtdsp.py b/flopy/mt3d/mtdsp.py index 84442e0b94..afe9375af1 100644 --- a/flopy/mt3d/mtdsp.py +++ b/flopy/mt3d/mtdsp.py @@ -225,8 +225,7 @@ def __init__( if len(list(kwargs.keys())) > 0: raise Exception( - "DSP error: unrecognized kwargs: " - + " ".join(list(kwargs.keys())) + "DSP error: unrecognized kwargs: " + " ".join(list(kwargs.keys())) ) self.parent.add_package(self) return @@ -271,9 +270,7 @@ def write_file(self): return @classmethod - def load( - cls, f, model, nlay=None, nrow=None, ncol=None, ext_unit_dict=None - ): + def load(cls, f, model, nlay=None, nrow=None, ncol=None, ext_unit_dict=None): """ Load an existing package. diff --git a/flopy/mt3d/mtlkt.py b/flopy/mt3d/mtlkt.py index 749744b8d0..a9f9a5f10e 100644 --- a/flopy/mt3d/mtlkt.py +++ b/flopy/mt3d/mtlkt.py @@ -222,8 +222,7 @@ def __init__( # Check to make sure that all kwargs have been consumed if len(list(kwargs.keys())) > 0: raise Exception( - "LKT error: unrecognized kwargs: " - + " ".join(list(kwargs.keys())) + "LKT error: unrecognized kwargs: " + " ".join(list(kwargs.keys())) ) self.parent.add_package(self) @@ -265,9 +264,7 @@ def write_file(self): # (Evap, precip, specified runoff into the lake, specified # withdrawal directly from the lake if self.lk_stress_period_data is not None: - self.lk_stress_period_data.write_transient( - f_lkt, single_per=kper - ) + self.lk_stress_period_data.write_transient(f_lkt, single_per=kper) else: f_lkt.write("0\n") @@ -275,9 +272,7 @@ def write_file(self): return @classmethod - def load( - cls, f, model, nlak=None, nper=None, ncomp=None, ext_unit_dict=None - ): + def load(cls, f, model, nlak=None, nper=None, ncomp=None, ext_unit_dict=None): """ Load an existing package. @@ -366,23 +361,15 @@ def load( " Mass does not exit the model via simulated lake evaporation " ) else: - print( - " Mass exits the lake via simulated lake evaporation " - ) + print(" Mass exits the lake via simulated lake evaporation ") # Item 2 (COLDLAK - Initial concentration in this instance) if model.verbose: print(" loading initial concentration (COLDLAK) ") if model.free_format: - print( - " Using MODFLOW style array reader utilities to " - "read COLDLAK" - ) + print(" Using MODFLOW style array reader utilities to read COLDLAK") elif model.array_format == "mt3d": - print( - " Using historic MT3DMS array reader utilities to " - "read COLDLAK" - ) + print(" Using historic MT3DMS array reader utilities to read COLDLAK") kwargs = {} coldlak = Util2d.load( @@ -419,9 +406,7 @@ def load( for iper in range(nper): if model.verbose: - print( - f" loading lkt boundary condition data for kper {iper + 1:5d}" - ) + print(f" loading lkt boundary condition data for kper {iper + 1:5d}") # Item 3: NTMP: An integer value corresponding to the number of # specified lake boundary conditions to follow. @@ -453,9 +438,7 @@ def load( if cbclk > 0: for ilkvar in range(cbclk): t.append(m_arr[ilkvar + 2]) - current_lk[ilkbnd] = tuple( - t[: len(current_lk.dtype.names)] - ) + current_lk[ilkbnd] = tuple(t[: len(current_lk.dtype.names)]) # Convert ILKBC (node) index to zero-based current_lk["node"] -= 1 current_lk = current_lk.view(np.recarray) @@ -478,9 +461,7 @@ def load( ext_unit_dict, filetype=Mt3dLkt._ftype() ) if icbclk > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=icbclk - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=icbclk) model.add_pop_key_list(icbclk) # Construct and return LKT package diff --git a/flopy/mt3d/mtrct.py b/flopy/mt3d/mtrct.py index 93cbf317c9..b4e008f909 100644 --- a/flopy/mt3d/mtrct.py +++ b/flopy/mt3d/mtrct.py @@ -409,8 +409,7 @@ def __init__( # Check to make sure that all kwargs have been consumed if len(list(kwargs.keys())) > 0: raise Exception( - "RCT error: unrecognized kwargs: " - + " ".join(list(kwargs.keys())) + "RCT error: unrecognized kwargs: " + " ".join(list(kwargs.keys())) ) self.parent.add_package(self) @@ -431,8 +430,7 @@ def write_file(self): # Open file for writing f_rct = open(self.fn_path, "w") f_rct.write( - "%10i%10i%10i%10i\n" - % (self.isothm, self.ireact, self.irctop, self.igetsc) + "%10i%10i%10i%10i\n" % (self.isothm, self.ireact, self.irctop, self.igetsc) ) if self.isothm in [1, 2, 3, 4, 6]: f_rct.write(self.rhob.get_file_entry()) diff --git a/flopy/mt3d/mtsft.py b/flopy/mt3d/mtsft.py index 1aeade5b9a..c66fd613ce 100644 --- a/flopy/mt3d/mtsft.py +++ b/flopy/mt3d/mtsft.py @@ -445,9 +445,7 @@ def write_file(self): return @classmethod - def load( - cls, f, model, nsfinit=None, nper=None, ncomp=None, ext_unit_dict=None - ): + def load(cls, f, model, nsfinit=None, nper=None, ncomp=None, ext_unit_dict=None): """ Load an existing package. @@ -606,15 +604,9 @@ def load( print(" loading COLDSF...") if model.free_format: - print( - " Using MODFLOW style array reader utilities to " - "read COLDSF" - ) + print(" Using MODFLOW style array reader utilities to read COLDSF") elif model.array_format == "mt3d": - print( - " Using historic MT3DMS array reader utilities to " - "read COLDSF" - ) + print(" Using historic MT3DMS array reader utilities to read COLDSF") coldsf = Util2d.load( f, @@ -646,15 +638,9 @@ def load( # Item 4 (DISPSF(NRCH)) Reach-by-reach dispersion if model.verbose: if model.free_format: - print( - " Using MODFLOW style array reader utilities to " - "read DISPSF" - ) + print(" Using MODFLOW style array reader utilities to read DISPSF") elif model.array_format == "mt3d": - print( - " Using historic MT3DMS array reader utilities to " - "read DISPSF" - ) + print(" Using historic MT3DMS array reader utilities to read DISPSF") dispsf = Util2d.load( f, diff --git a/flopy/mt3d/mtssm.py b/flopy/mt3d/mtssm.py index 8887c57cdb..e0af06e1a4 100644 --- a/flopy/mt3d/mtssm.py +++ b/flopy/mt3d/mtssm.py @@ -242,9 +242,7 @@ def __init__( if self.stress_period_data is not None: for i in range(nper): if i in self.stress_period_data.data: - mxss_kper += np.sum( - self.stress_period_data.data[i].itype == -1 - ) + mxss_kper += np.sum(self.stress_period_data.data[i].itype == -1) mxss_kper += np.sum( self.stress_period_data.data[i].itype == -15 ) @@ -307,12 +305,8 @@ def __init__( self.cevt = None try: - if cevt is None and ( - model.mf.evt is not None or model.mf.ets is not None - ): - print( - "found 'ets'/'evt' in modflow model, resetting cevt to 0.0" - ) + if cevt is None and (model.mf.evt is not None or model.mf.ets is not None): + print("found 'ets'/'evt' in modflow model, resetting cevt to 0.0") cevt = 0.0 except: if model.verbose: @@ -355,8 +349,7 @@ def __init__( if len(list(kwargs.keys())) > 0: raise Exception( - "SSM error: unrecognized kwargs: " - + " ".join(list(kwargs.keys())) + "SSM error: unrecognized kwargs: " + " ".join(list(kwargs.keys())) ) # Add self to parent and return @@ -548,9 +541,7 @@ def load( # Item D1: Dummy input line - line already read above if model.verbose: - print( - " loading FWEL, FDRN, FRCH, FEVT, FRIV, FGHB, (FNEW(n), n=1,4)..." - ) + print(" loading FWEL, FDRN, FRCH, FEVT, FRIV, FGHB, (FNEW(n), n=1,4)...") fwel = line[0:2] fdrn = line[2:4] frch = line[4:6] @@ -728,10 +719,7 @@ def load( # Item D8: KSS, ISS, JSS, CSS, ITYPE, (CSSMS(n),n=1,NCOMP) if model.verbose: - print( - " loading KSS, ISS, JSS, CSS, ITYPE, " - "(CSSMS(n),n=1,NCOMP)..." - ) + print(" loading KSS, ISS, JSS, CSS, ITYPE, (CSSMS(n),n=1,NCOMP)...") if nss > 0: current = np.empty((nss), dtype=dtype) for ibnd in range(nss): diff --git a/flopy/mt3d/mttob.py b/flopy/mt3d/mttob.py index 1d7927e489..6631ff2327 100644 --- a/flopy/mt3d/mttob.py +++ b/flopy/mt3d/mttob.py @@ -67,15 +67,11 @@ def write_file(self): MaxFluxCells = MaxFluxCells + len(FluxGroup[1]) MaxFluxObs = MaxFluxObs + 1 f_tob.write("%10d%10d%10d\n" % (MaxConcObs, MaxFluxObs, MaxFluxCells)) - f_tob.write( - "%s%10d%10d%10d\n" % (self.outnam, inConcObs, inFluxObs, inSaveObs) - ) + f_tob.write("%s%10d%10d%10d\n" % (self.outnam, inConcObs, inFluxObs, inSaveObs)) if inFluxObs: nFluxGroup = len(self.FluxGroups) - f_tob.write( - "%10d%10f%10d\n" % (nFluxGroup, self.FScale, self.iOutFlux) - ) + f_tob.write("%10d%10f%10d\n" % (nFluxGroup, self.FScale, self.iOutFlux)) for FluxGroup in self.FluxGroups: nFluxTimeObs, FluxTimeObs = self.assign_layer_row_column_data( FluxGroup[0], 5, zerobase=False @@ -94,9 +90,7 @@ def write_file(self): ) for c in Cells: c = c[0] # Still to fix this! - f_tob.write( - "%10d%10d%10d%10f\n" % (c[0], c[1], c[2], c[3]) - ) + f_tob.write("%10d%10d%10d%10f\n" % (c[0], c[1], c[2], c[3])) f_tob.close() return diff --git a/flopy/mt3d/mtuzt.py b/flopy/mt3d/mtuzt.py index e979aa9472..568d0d2423 100644 --- a/flopy/mt3d/mtuzt.py +++ b/flopy/mt3d/mtuzt.py @@ -352,9 +352,7 @@ def write_file(self): incuzinf = max(incuzinf, incuzinficomp) if incuzinf == 1: break - f_uzt.write( - f"{incuzinf:10d} # INCUZINF - SP {kper + 1:5d}\n" - ) + f_uzt.write(f"{incuzinf:10d} # INCUZINF - SP {kper + 1:5d}\n") if incuzinf == 1: for t2d in self.cuzinf: u2d = t2d[kper] @@ -497,9 +495,7 @@ def load( cuzinf = None # At least one species being simulated, so set up a place holder - t2d = Transient2d( - model, (nrow, ncol), np.float32, 0.0, name="cuzinf", locat=0 - ) + t2d = Transient2d(model, (nrow, ncol), np.float32, 0.0, name="cuzinf", locat=0) cuzinf = {0: t2d} if ncomp > 1: for icomp in range(2, ncomp + 1): @@ -726,9 +722,7 @@ def load( ext_unit_dict, filetype=Mt3dUzt._ftype() ) if icbcuz > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=icbcuz - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=icbcuz) model.add_pop_key_list(icbcuz) # Construct and return uzt package diff --git a/flopy/pakbase.py b/flopy/pakbase.py index bf153da5d6..24233a1a36 100644 --- a/flopy/pakbase.py +++ b/flopy/pakbase.py @@ -211,18 +211,14 @@ def _get_kparams(self): kparams[kp] = name if "hk" in self.__dict__: if self.hk.shape[1] is None: - hk = np.asarray( - [a.array.flatten() for a in self.hk], dtype=object - ) + hk = np.asarray([a.array.flatten() for a in self.hk], dtype=object) else: hk = self.hk.array.copy() else: hk = self.k.array.copy() if "vka" in self.__dict__ and self.layvka.sum() > 0: if self.vka.shape[1] is None: - vka = np.asarray( - [a.array.flatten() for a in self.vka], dtype=object - ) + vka = np.asarray([a.array.flatten() for a in self.vka], dtype=object) else: vka = self.vka.array vka_param = kparams.pop("vka") @@ -330,9 +326,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None): ): chk = self._check_oc(f, verbose, level, checktype) # check property values in upw and lpf packages - elif self.name[0] in ["UPW", "LPF"] or self.package_type.upper() in [ - "NPF" - ]: + elif self.name[0] in ["UPW", "LPF"] or self.package_type.upper() in ["NPF"]: chk = self._check_flowp(f, verbose, level, checktype) elif self.package_type.upper() in ["STO"]: chk = self._get_check(f, verbose, level, checktype) @@ -417,19 +411,14 @@ def _check_storage(self, chk, storage_coeff): else: iconvert = self.iconvert.array inds = np.array( - [ - True if l > 0 or l < 0 else False - for l in iconvert.flatten() - ] + [True if l > 0 or l < 0 else False for l in iconvert.flatten()] ) if not inds.any(): skip_sy_check = True for ishape in np.ndindex(active.shape): if active[ishape]: - active[ishape] = ( - iconvert[ishape] > 0 or iconvert[ishape] < 0 - ) + active[ishape] = iconvert[ishape] > 0 or iconvert[ishape] < 0 if not skip_sy_check: chk.values( sarrays["sy"], @@ -528,21 +517,18 @@ def __getitem__(self, item): spd = getattr(self, "stress_period_data") if isinstance(item, MfList): if not isinstance(item, list) and not isinstance(item, tuple): - msg = ( - f"package.__getitem__() kper {item} not in data.keys()" - ) + msg = f"package.__getitem__() kper {item} not in data.keys()" assert item in list(spd.data.keys()), msg return spd[item] if item[1] not in self.dtype.names: raise Exception( - "package.__getitem(): item {} not in dtype names " - "{}".format(item, self.dtype.names) + "package.__getitem(): item {} not in dtype names {}".format( + item, self.dtype.names + ) ) - msg = ( - f"package.__getitem__() kper {item[0]} not in data.keys()" - ) + msg = f"package.__getitem__() kper {item[0]} not in data.keys()" assert item[0] in list(spd.data.keys()), msg if spd.vtype[item[0]] == np.recarray: @@ -925,9 +911,7 @@ def load( if nppak > 0: mxl = int(t[2]) if model.verbose: - print( - f" Parameters detected. Number of parameters = {nppak}" - ) + print(f" Parameters detected. Number of parameters = {nppak}") line = f.readline() # dataset 2a @@ -950,9 +934,7 @@ def load( mxl = int(t[3]) imax += 1 if model.verbose: - print( - f" Parameters detected. Number of parameters = {nppak}" - ) + print(f" Parameters detected. Number of parameters = {nppak}") options = [] aux_names = [] @@ -1024,9 +1006,7 @@ def load( dt = pak_type.get_empty( 1, aux_names=aux_names, structured=model.structured ).dtype - pak_parms = mfparbc.load( - f, nppak, dt, model, ext_unit_dict, model.verbose - ) + pak_parms = mfparbc.load(f, nppak, dt, model, ext_unit_dict, model.verbose) if nper is None: nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() @@ -1070,9 +1050,7 @@ def load( current = pak_type.get_empty( itmp, aux_names=aux_names, structured=model.structured ) - current = ulstrd( - f, itmp, current, model, sfac_columns, ext_unit_dict - ) + current = ulstrd(f, itmp, current, model, sfac_columns, ext_unit_dict) if model.structured: current["k"] -= 1 current["i"] -= 1 @@ -1126,16 +1104,12 @@ def load( iname = "static" except: if model.verbose: - print( - f" implicit static instance for parameter {pname}" - ) + print(f" implicit static instance for parameter {pname}") par_dict, current_dict = pak_parms.get(pname) data_dict = current_dict[iname] - par_current = pak_type.get_empty( - par_dict["nlst"], aux_names=aux_names - ) + par_current = pak_type.get_empty(par_dict["nlst"], aux_names=aux_names) # get appropriate parval if model.mfpar.pval is None: @@ -1149,9 +1123,7 @@ def load( # fill current parameter data (par_current) for ibnd, t in enumerate(data_dict): t = tuple(t) - par_current[ibnd] = tuple( - t[: len(par_current.dtype.names)] - ) + par_current[ibnd] = tuple(t[: len(par_current.dtype.names)]) if model.structured: par_current["k"] -= 1 @@ -1196,9 +1168,7 @@ def load( ext_unit_dict, filetype=pak_type._ftype() ) if ipakcb > 0: - iu, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + iu, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) if "mfusgwel" in pak_type_str: diff --git a/flopy/plot/crosssection.py b/flopy/plot/crosssection.py index 19a9e3fc13..7229fddb3c 100644 --- a/flopy/plot/crosssection.py +++ b/flopy/plot/crosssection.py @@ -111,9 +111,7 @@ def __init__( ( xverts, yverts, - ) = plotutil.UnstructuredPlotUtilities.irregular_shape_patch( - xverts, yverts - ) + ) = plotutil.UnstructuredPlotUtilities.irregular_shape_patch(xverts, yverts) self.xvertices, self.yvertices = geometry.transform( xverts, @@ -244,9 +242,7 @@ def __init__( else: self.active = np.ones(self.mg.nlay, dtype=int) - self._nlay, self._ncpl, self.ncb = self.mg.cross_section_lay_ncpl_ncb( - self.ncb - ) + self._nlay, self._ncpl, self.ncb = self.mg.cross_section_lay_ncpl_ncb(self.ncb) top = self.mg.top.reshape(1, self._ncpl) botm = self.mg.botm.reshape(self._nlay + self.ncb, self._ncpl) @@ -350,9 +346,7 @@ def polygons(self): if cell not in self._polygons: self._polygons[cell] = [Polygon(verts, closed=True)] else: - self._polygons[cell].append( - Polygon(verts, closed=True) - ) + self._polygons[cell].append(Polygon(verts, closed=True)) return copy.copy(self._polygons) @@ -497,9 +491,7 @@ def plot_surface(self, a, masked_values=None, **kwargs): elif a[cell] is np.ma.masked: continue else: - line = ax.plot( - d[cell], [a[cell], a[cell]], color=color, **kwargs - ) + line = ax.plot(d[cell], [a[cell], a[cell]], color=color, **kwargs) surface.append(line) ax = self._set_axes_limits(ax) @@ -555,9 +547,7 @@ def plot_fill_between( else: projpts = self.projpts - pc = self.get_grid_patch_collection( - a, projpts, fill_between=True, **kwargs - ) + pc = self.get_grid_patch_collection(a, projpts, fill_between=True, **kwargs) if pc is not None: ax.add_collection(pc) ax = self._set_axes_limits(ax) @@ -613,10 +603,7 @@ def contour_array(self, a, masked_values=None, head=None, **kwargs): zcenters = self.set_zcentergrid(np.ravel(head)) else: zcenters = np.array( - [ - np.mean(np.array(v).T[1]) - for i, v in sorted(self.projpts.items()) - ] + [np.mean(np.array(v).T[1]) for i, v in sorted(self.projpts.items())] ) # work around for tri-contour ignore vmin & vmax @@ -666,13 +653,9 @@ def contour_array(self, a, masked_values=None, head=None, **kwargs): if mplcontour: plotarray = np.ma.masked_array(plotarray, ismasked) if filled: - contour_set = ax.contourf( - xcenters, zcenters, plotarray, **kwargs - ) + contour_set = ax.contourf(xcenters, zcenters, plotarray, **kwargs) else: - contour_set = ax.contour( - xcenters, zcenters, plotarray, **kwargs - ) + contour_set = ax.contour(xcenters, zcenters, plotarray, **kwargs) else: triang = tri.Triangulation(xcenters, zcenters) analyze = tri.TriAnalyzer(triang) @@ -783,9 +766,7 @@ def plot_ibound( plotarray[idx1] = 1 plotarray[idx2] = 2 plotarray = np.ma.masked_equal(plotarray, 0) - cmap = matplotlib.colors.ListedColormap( - ["none", color_noflow, color_ch] - ) + cmap = matplotlib.colors.ListedColormap(["none", color_noflow, color_ch]) bounds = [0, 1, 2, 3] norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) # mask active cells @@ -820,9 +801,7 @@ def plot_grid(self, **kwargs): ax.add_collection(col) return col - def plot_bc( - self, name=None, package=None, kper=0, color=None, head=None, **kwargs - ): + def plot_bc(self, name=None, package=None, kper=0, color=None, head=None, **kwargs): """ Plot boundary conditions locations for a specific boundary type from a flopy model @@ -878,15 +857,11 @@ def plot_bc( try: mflist = pp.stress_period_data.array[kper] except Exception as e: - raise Exception( - f"Not a list-style boundary package: {e!s}" - ) + raise Exception(f"Not a list-style boundary package: {e!s}") if mflist is None: return - t = np.array( - [list(i) for i in mflist["cellid"]], dtype=int - ).T + t = np.array([list(i) for i in mflist["cellid"]], dtype=int).T if len(idx) == 0: idx = np.copy(t) @@ -901,9 +876,7 @@ def plot_bc( try: mflist = p.stress_period_data[kper] except Exception as e: - raise Exception( - f"Not a list-style boundary package: {e!s}" - ) + raise Exception(f"Not a list-style boundary package: {e!s}") if mflist is None: return if len(self.mg.shape) == 3: @@ -912,9 +885,7 @@ def plot_bc( idx = mflist["node"] if len(self.mg.shape) == 3: - plotarray = np.zeros( - (self.mg.nlay, self.mg.nrow, self.mg.ncol), dtype=int - ) + plotarray = np.zeros((self.mg.nlay, self.mg.nrow, self.mg.ncol), dtype=int) plotarray[idx[0], idx[1], idx[2]] = 1 elif len(self.mg.shape) == 2: plotarray = np.zeros((self._nlay, self._ncpl), dtype=int) @@ -1110,19 +1081,16 @@ def plot_vector( arbitrary = False pts = self.pts xuniform = [ - True if abs(pts.T[0, 0] - i) < self.mean_dy else False - for i in pts.T[0] + True if abs(pts.T[0, 0] - i) < self.mean_dy else False for i in pts.T[0] ] yuniform = [ - True if abs(pts.T[1, 0] - i) < self.mean_dx else False - for i in pts.T[1] + True if abs(pts.T[1, 0] - i) < self.mean_dx else False for i in pts.T[1] ] if not np.all(xuniform) and not np.all(yuniform): arbitrary = True if arbitrary: err_msg = ( - "plot_specific_discharge() does not " - "support arbitrary cross-sections" + "plot_specific_discharge() does not support arbitrary cross-sections" ) raise AssertionError(err_msg) @@ -1150,9 +1118,7 @@ def plot_vector( zcenters = self.set_zcentergrid(np.ravel(head), kstep=kstep) else: - zcenters = [ - np.mean(np.array(v).T[1]) for i, v in sorted(projpts.items()) - ] + zcenters = [np.mean(np.array(v).T[1]) for i, v in sorted(projpts.items())] xcenters = np.array( [np.mean(np.array(v).T[0]) for i, v in sorted(projpts.items())] @@ -1193,9 +1159,7 @@ def plot_vector( return quiver - def plot_pathline( - self, pl, travel_time=None, method="cell", head=None, **kwargs - ): + def plot_pathline(self, pl, travel_time=None, method="cell", head=None, **kwargs): """ Plot particle pathlines. Compatible with MODFLOW 6 PRT particle track data format, or MODPATH 6 or 7 pathline data format. @@ -1335,9 +1299,7 @@ def plot_pathline( return lc - def plot_timeseries( - self, ts, travel_time=None, method="cell", head=None, **kwargs - ): + def plot_timeseries(self, ts, travel_time=None, method="cell", head=None, **kwargs): """ Plot the MODPATH timeseries. Not compatible with MODFLOW 6 PRT. @@ -1532,9 +1494,7 @@ def get_grid_line_collection(self, **kwargs): facecolor = kwargs.pop("facecolor", "none") facecolor = kwargs.pop("fc", facecolor) - polygons = [ - p for _, polys in sorted(self.polygons.items()) for p in polys - ] + polygons = [p for _, polys in sorted(self.polygons.items()) for p in polys] if len(polygons) > 0: patches = PatchCollection( polygons, edgecolor=edgecolor, facecolor=facecolor, **kwargs @@ -1771,9 +1731,7 @@ def get_grid_patch_collection( data.append(plotarray[cell]) if len(rectcol) > 0: - patches = PatchCollection( - rectcol, match_original=match_original, **kwargs - ) + patches = PatchCollection(rectcol, match_original=match_original, **kwargs) if not fill_between: patches.set_array(np.array(data)) patches.set_clim(vmin, vmax) diff --git a/flopy/plot/map.py b/flopy/plot/map.py index d469d0dbef..49a53b38b9 100644 --- a/flopy/plot/map.py +++ b/flopy/plot/map.py @@ -42,9 +42,7 @@ class PlotMapView: """ - def __init__( - self, model=None, modelgrid=None, ax=None, layer=0, extent=None - ): + def __init__(self, model=None, modelgrid=None, ax=None, layer=0, extent=None): self.model = model self.layer = layer self.mg = None @@ -149,9 +147,7 @@ def plot_array(self, a, masked_values=None, **kwargs): return if not isinstance(polygons[0], Path): - collection = ax.pcolormesh( - self.mg.xvertices, self.mg.yvertices, plotarray - ) + collection = ax.pcolormesh(self.mg.xvertices, self.mg.yvertices, plotarray) else: plotarray = plotarray.ravel() @@ -506,15 +502,11 @@ def plot_bc( try: mflist = pp.stress_period_data.array[kper] except Exception as e: - raise Exception( - f"Not a list-style boundary package: {e!s}" - ) + raise Exception(f"Not a list-style boundary package: {e!s}") if mflist is None: return - t = np.array( - [list(i) for i in mflist["cellid"]], dtype=int - ).T + t = np.array([list(i) for i in mflist["cellid"]], dtype=int).T if len(idx) == 0: idx = np.copy(t) @@ -529,9 +521,7 @@ def plot_bc( try: mflist = p.stress_period_data[kper] except Exception as e: - raise Exception( - f"Not a list-style boundary package: {e!s}" - ) + raise Exception(f"Not a list-style boundary package: {e!s}") if mflist is None: return if len(self.mg.shape) == 3: @@ -655,9 +645,7 @@ def plot_centers( xcenters = self.mg.get_xcellcenters_for_layer(self.layer).ravel() ycenters = self.mg.get_ycellcenters_for_layer(self.layer).ravel() - idomain = self.mg.get_plottable_layer_array( - self.mg.idomain, self.layer - ).ravel() + idomain = self.mg.get_plottable_layer_array(self.mg.idomain, self.layer).ravel() active_ixs = list(range(len(xcenters))) if not inactive: diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index 3dcd4ebb4d..09e5f8a6e7 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -841,8 +841,7 @@ def _plot_util3d_helper( name = [name] * nplottable_layers names = [ - f"{model_name}{name[k]} layer {k + 1}" - for k in range(nplottable_layers) + f"{model_name}{name[k]} layer {k + 1}" for k in range(nplottable_layers) ] filenames = None @@ -988,9 +987,7 @@ def _plot_transient2d_helper( return axes @staticmethod - def _plot_scalar_helper( - scalar, filename_base=None, file_extension=None, **kwargs - ): + def _plot_scalar_helper(scalar, filename_base=None, file_extension=None, **kwargs): """ Helper method to plot scalar objects @@ -1153,9 +1150,7 @@ def _plot_array_helper( for idx, k in enumerate(range(i0, i1)): fig = plt.figure(num=fignum[idx]) - pmv = PlotMapView( - ax=axes[idx], model=model, modelgrid=modelgrid, layer=k - ) + pmv = PlotMapView(ax=axes[idx], model=model, modelgrid=modelgrid, layer=k) if defaults["pcolor"]: cm = pmv.plot_array( plotarray, @@ -1661,9 +1656,7 @@ def line_intersect_grid(ptsin, xgrid, ygrid): yc = y[cell] verts = [ (xt, yt) - for xt, yt in zip( - xc[cell_vertex_ix[iix]], yc[cell_vertex_ix[iix]] - ) + for xt, yt in zip(xc[cell_vertex_ix[iix]], yc[cell_vertex_ix[iix]]) ] if cell in vdict: @@ -1915,9 +1908,7 @@ def calc_conc(self, zeta, layer=None): pct = {} for isrf in range(self.__nsrf): z = zeta[isrf] - pct[isrf] = (self.__botm[:-1, :, :] - z[:, :, :]) / self.__b[ - :, :, : - ] + pct[isrf] = (self.__botm[:-1, :, :] - z[:, :, :]) / self.__b[:, :, :] for isrf in range(self.__nsrf): p = pct[isrf] if self.__istrat == 1: @@ -2022,9 +2013,7 @@ def shapefile_get_vertices(shp): return vertices -def shapefile_to_patch_collection( - shp: Union[str, os.PathLike], radius=500.0, idx=None -): +def shapefile_to_patch_collection(shp: Union[str, os.PathLike], radius=500.0, idx=None): """ Create a patch collection from the shapes in a shapefile @@ -2448,9 +2437,7 @@ def intersect_modpath_with_crosssection( oppts[cell], ) idx = [ - i - for i, (x, y) in enumerate(zip(m0[0], m1[0])) - if x == y == True + i for i, (x, y) in enumerate(zip(m0[0], m1[0])) if x == y == True ] else: idx = [i for i, x in enumerate(m0[0]) if x == True] @@ -2749,9 +2736,7 @@ def to_mp7_pathlines( # return early if already in MP7 format if "t" not in dt: - return ( - data if ret_type == pd.DataFrame else data.to_records(index=False) - ) + return data if ret_type == pd.DataFrame else data.to_records(index=False) # return early if empty if data.empty: @@ -2822,9 +2807,7 @@ def to_mp7_endpoints( # check format dt = data.dtypes if all(n in dt for n in MP7_ENDPOINT_DTYPE.names): - return ( - data if ret_type == pd.DataFrame else data.to_records(index=False) - ) + return data if ret_type == pd.DataFrame else data.to_records(index=False) if not ( all(n in dt for n in MIN_PARTICLE_TRACK_DTYPE.names) or all(n in dt for n in PRT_PATHLINE_DTYPE.names) @@ -2848,12 +2831,8 @@ def to_mp7_endpoints( data[seqn_key] = particles.ngroup() # select startpoints and endpoints, sorting by sequencenumber - startpts = ( - data.sort_values("t").groupby(seqn_key).head(1).sort_values(seqn_key) - ) - endpts = ( - data.sort_values("t").groupby(seqn_key).tail(1).sort_values(seqn_key) - ) + startpts = data.sort_values("t").groupby(seqn_key).head(1).sort_values(seqn_key) + endpts = data.sort_values("t").groupby(seqn_key).tail(1).sort_values(seqn_key) # add columns for pairings = [ @@ -2952,9 +2931,7 @@ def to_prt_pathlines( # return early if already in PRT format if "t" in dt: - return ( - data if ret_type == pd.DataFrame else data.to_records(index=False) - ) + return data if ret_type == pd.DataFrame else data.to_records(index=False) # return early if empty if data.empty: diff --git a/flopy/plot/styles.py b/flopy/plot/styles.py index 48e916f07f..25ffdc0cf1 100644 --- a/flopy/plot/styles.py +++ b/flopy/plot/styles.py @@ -95,9 +95,7 @@ def heading( if letter is None and idx is not None: letter = chr(ord("A") + idx) - font = styles.__set_fontspec( - bold=True, italic=False, fontsize=fontsize - ) + font = styles.__set_fontspec(bold=True, italic=False, fontsize=fontsize) if letter is not None: if heading is None: @@ -148,9 +146,7 @@ def xlabel(cls, ax=None, label="", bold=False, italic=False, **kwargs): if ax is None: ax = plt.gca() fontsize = kwargs.pop("fontsize", 9) - fontspec = styles.__set_fontspec( - bold=bold, italic=italic, fontsize=fontsize - ) + fontspec = styles.__set_fontspec(bold=bold, italic=italic, fontsize=fontsize) ax.set_xlabel(label, fontdict=fontspec, **kwargs) @classmethod @@ -178,9 +174,7 @@ def ylabel(cls, ax=None, label="", bold=False, italic=False, **kwargs): ax = plt.gca() fontsize = kwargs.pop("fontsize", 9) - fontspec = styles.__set_fontspec( - bold=bold, italic=italic, fontsize=fontsize - ) + fontspec = styles.__set_fontspec(bold=bold, italic=italic, fontsize=fontsize) ax.set_ylabel(label, fontdict=fontspec, **kwargs) @classmethod @@ -311,9 +305,7 @@ def add_text( else: transform = ax.transData - font = styles.__set_fontspec( - bold=bold, italic=italic, fontsize=fontsize - ) + font = styles.__set_fontspec(bold=bold, italic=italic, fontsize=fontsize) text_obj = ax.text( x, @@ -381,9 +373,7 @@ def add_annotation( if xytext is None: xytext = (0.0, 0.0) - fontspec = styles.__set_fontspec( - bold=bold, italic=italic, fontsize=fontsize - ) + fontspec = styles.__set_fontspec(bold=bold, italic=italic, fontsize=fontsize) # add font information to kwargs if kwargs is None: kwargs = fontspec diff --git a/flopy/seawat/swt.py b/flopy/seawat/swt.py index 7339b69914..80835e5224 100644 --- a/flopy/seawat/swt.py +++ b/flopy/seawat/swt.py @@ -141,9 +141,7 @@ def __init__( # the starting external data unit number self._next_ext_unit = 3000 if external_path is not None: - assert ( - model_ws == "." - ), "ERROR: external cannot be used with model_ws" + assert model_ws == ".", "ERROR: external cannot be used with model_ws" if os.path.exists(external_path): print(f"Note: external_path {external_path} already exists") @@ -295,13 +293,9 @@ def _set_name(self, value): def change_model_ws(self, new_pth=None, reset_external=False): # if hasattr(self,"_mf"): if self._mf is not None: - self._mf.change_model_ws( - new_pth=new_pth, reset_external=reset_external - ) + self._mf.change_model_ws(new_pth=new_pth, reset_external=reset_external) if self._mt is not None: - self._mt.change_model_ws( - new_pth=new_pth, reset_external=reset_external - ) + self._mt.change_model_ws(new_pth=new_pth, reset_external=reset_external) super().change_model_ws(new_pth=new_pth, reset_external=reset_external) def write_name_file(self): @@ -400,9 +394,7 @@ def write_name_file(self): f_nam.write(f"{tag:14s} {u:5d} {f}\n") # write the output files - for u, f, b in zip( - self.output_units, self.output_fnames, self.output_binflag - ): + for u, f, b in zip(self.output_units, self.output_fnames, self.output_binflag): if u == 0: continue if b: diff --git a/flopy/seawat/swtvdf.py b/flopy/seawat/swtvdf.py index 914fe92c4e..2ea71dfa1f 100644 --- a/flopy/seawat/swtvdf.py +++ b/flopy/seawat/swtvdf.py @@ -284,14 +284,11 @@ def write_file(self): elif self.mtdnconc == -1: f_vdf.write( - "%10.4f%10.4f%10.4f\n" - % (self.denseref, self.drhodprhd, self.prhdref) + "%10.4f%10.4f%10.4f\n" % (self.denseref, self.drhodprhd, self.prhdref) ) f_vdf.write("%10i\n" % self.nsrhoeos) if self.nsrhoeos == 1: - f_vdf.write( - "%10i%10.4f%10.4f\n" % (1, self.denseslp, self.crhoref) - ) + f_vdf.write("%10i%10.4f%10.4f\n" % (1, self.denseslp, self.crhoref)) else: for i in range(self.nsrhoeos): mtrhospec = 1 + i @@ -467,9 +464,7 @@ def load(cls, f, model, nper=None, ext_unit_dict=None): for iper in range(nper): if model.verbose: - print( - f" loading INDENSE for stress period {iper + 1}..." - ) + print(f" loading INDENSE for stress period {iper + 1}...") line = f.readline() t = line.strip().split() indense = int(t[0]) diff --git a/flopy/seawat/swtvsc.py b/flopy/seawat/swtvsc.py index 368bd1f6a6..3afe619f87 100644 --- a/flopy/seawat/swtvsc.py +++ b/flopy/seawat/swtvsc.py @@ -149,8 +149,7 @@ def __init__( ): if len(list(kwargs.keys())) > 0: raise Exception( - "VSC error: unrecognized kwargs: " - + " ".join(list(kwargs.keys())) + "VSC error: unrecognized kwargs: " + " ".join(list(kwargs.keys())) ) if unitnumber is None: diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index a729308ea1..63433dd90a 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -209,19 +209,13 @@ def set_values(self, **kwargs): try: self.header[0][k] = int(kwargs[k]) except: - print( - f"{k} key not available in {self.header_type} " - "header dtype" - ) + print(f"{k} key not available in {self.header_type} header dtype") for k in fkey: if k in kwargs.keys(): try: self.header[0][k] = float(kwargs[k]) except: - print( - f"{k} key not available " - f"in {self.header_type} header dtype" - ) + print(f"{k} key not available in {self.header_type} header dtype") for k in ckey: if k in kwargs.keys(): # Convert to upper case to be consistent case used by MODFLOW @@ -460,9 +454,7 @@ class BinaryLayerFile(LayerFile): pointing to the 1st byte of data for the corresponding data arrays. """ - def __init__( - self, filename: Union[str, os.PathLike], precision, verbose, **kwargs - ): + def __init__(self, filename: Union[str, os.PathLike], precision, verbose, **kwargs): super().__init__(filename, precision, verbose, **kwargs) def _build_index(self): @@ -590,9 +582,7 @@ def get_ts(self, idx): for k, i, j in kijlist: ioffset = (i * self.ncol + j) * self.realtype(1).nbytes for irec, header in enumerate(self.recordarray): - ilay = ( - header["ilay"] - 1 - ) # change ilay from header to zero-based + ilay = header["ilay"] - 1 # change ilay from header to zero-based if ilay != k: continue ipos = self.iposarray[irec].item() @@ -659,9 +649,7 @@ def __init__( s = f"Error. Precision could not be determined for {filename}" print(s) raise Exception() - self.header_dtype = BinaryHeader.set_dtype( - bintype="Head", precision=precision - ) + self.header_dtype = BinaryHeader.set_dtype(bintype="Head", precision=precision) super().__init__(filename, precision, verbose, **kwargs) def reverse(self, filename: Optional[os.PathLike] = None): @@ -690,10 +678,7 @@ def get_max_kper_kstp_tsim(): kstp = {0: 0} for i in range(len(self) - 1, -1, -1): header = self.recordarray[i] - if ( - header["kper"] in kstp - and header["kstp"] > kstp[header["kper"]] - ): + if header["kper"] in kstp and header["kstp"] > kstp[header["kper"]]: kstp[header["kper"]] += 1 else: kstp[header["kper"]] = 0 @@ -828,9 +813,7 @@ def __init__( s = f"Error. Precision could not be determined for {filename}" print(s) raise Exception() - self.header_dtype = BinaryHeader.set_dtype( - bintype="Ucn", precision=precision - ) + self.header_dtype = BinaryHeader.set_dtype(bintype="Ucn", precision=precision) super().__init__(filename, precision, verbose, **kwargs) return @@ -898,9 +881,7 @@ def __init__( s = f"Error. Precision could not be determined for {filename}" print(s) raise Exception() - self.header_dtype = BinaryHeader.set_dtype( - bintype="Head", precision=precision - ) + self.header_dtype = BinaryHeader.set_dtype(bintype="Head", precision=precision) super().__init__(filename, precision, verbose, **kwargs) def _get_data_array(self, totim=0.0): @@ -911,9 +892,7 @@ def _get_data_array(self, totim=0.0): """ if totim >= 0.0: - keyindices = np.asarray( - self.recordarray["totim"] == totim - ).nonzero()[0] + keyindices = np.asarray(self.recordarray["totim"] == totim).nonzero()[0] if len(keyindices) == 0: msg = f"totim value ({totim}) not found in file..." raise Exception(msg) @@ -1317,9 +1296,7 @@ def _build_index(self): ipos # store the position right after header2 ) self.recordarray.append(header) - self.iposarray.append( - ipos - ) # store the position right after header2 + self.iposarray.append(ipos) # store the position right after header2 # skip over the data to the next record and set ipos self._skip_record(header) @@ -1348,9 +1325,7 @@ def _build_index(self): dtype = self.header_dtype[name] if np.issubdtype(dtype, bytes): # convert to str self.headers[name] = ( - self.headers[name] - .str.decode("ascii", "strict") - .str.strip() + self.headers[name].str.decode("ascii", "strict").str.strip() ) def _skip_record(self, header): @@ -1476,8 +1451,7 @@ def _find_paknam(self, paknam, to=False): break if paknam16 is None: raise Exception( - "The specified package name string is not " - "in the budget file." + "The specified package name string is not in the budget file." ) return paknam16 @@ -1630,9 +1604,7 @@ def get_indices(self, text=None): # check and make sure that text is in file if text is not None: text16 = self._find_text(text) - select_indices = np.asarray( - self.recordarray["text"] == text16 - ).nonzero() + select_indices = np.asarray(self.recordarray["text"] == text16).nonzero() if isinstance(select_indices, tuple): select_indices = select_indices[0] else: @@ -1755,22 +1727,14 @@ def get_data( if kstpkper is not None: kstp1 = kstpkper[0] + 1 kper1 = kstpkper[1] + 1 - select_indices = select_indices & ( - self.recordarray["kstp"] == kstp1 - ) - select_indices = select_indices & ( - self.recordarray["kper"] == kper1 - ) + select_indices = select_indices & (self.recordarray["kstp"] == kstp1) + select_indices = select_indices & (self.recordarray["kper"] == kper1) selected = True if text16 is not None: - select_indices = select_indices & ( - self.recordarray["text"] == text16 - ) + select_indices = select_indices & (self.recordarray["text"] == text16) selected = True if paknam16 is not None: - select_indices = select_indices & ( - self.recordarray["paknam"] == paknam16 - ) + select_indices = select_indices & (self.recordarray["paknam"] == paknam16) selected = True if paknam16_2 is not None: select_indices = select_indices & ( @@ -1832,8 +1796,7 @@ def get_ts(self, idx, text=None, times=None): # issue exception if text not provided if text is None: raise Exception( - "text keyword must be provided to CellBudgetFile " - "get_ts() method." + "text keyword must be provided to CellBudgetFile get_ts() method." ) kijlist = self._build_kijlist(idx) @@ -1883,8 +1846,7 @@ def get_ts(self, idx, text=None, times=None): if self.modelgrid.grid_type == "structured": ndx = [ - lrc[0] - * (self.modelgrid.nrow * self.modelgrid.ncol) + lrc[0] * (self.modelgrid.nrow * self.modelgrid.ncol) + lrc[1] * self.modelgrid.ncol + (lrc[2] + 1) for lrc in kijlist @@ -1923,8 +1885,9 @@ def _build_kijlist(self, idx): fail = True if fail: raise Exception( - "Invalid cell index. Cell {} not within model grid: " - "{}".format((k, i, j), (self.nlay, self.nrow, self.ncol)) + "Invalid cell index. Cell {} not within model grid: {}".format( + (k, i, j), (self.nlay, self.nrow, self.ncol) + ) ) return kijlist @@ -1936,9 +1899,7 @@ def _get_nstation(self, idx, kijlist): def _init_result(self, nstation): # Initialize result array and put times in first column - result = np.empty( - (len(self.kstpkper), nstation + 1), dtype=self.realtype - ) + result = np.empty((len(self.kstpkper), nstation + 1), dtype=self.realtype) result[:, :] = np.nan if len(self.times) == result.shape[0]: result[:, 0] = np.array(self.times) @@ -1998,17 +1959,13 @@ def get_record(self, idx, full3D=False): if self.verbose: s += f"an array of shape {(nlay, nrow, ncol)}" print(s) - return binaryread( - self.file, self.realtype(1), shape=(nlay, nrow, ncol) - ) + return binaryread(self.file, self.realtype(1), shape=(nlay, nrow, ncol)) # imeth 1 elif imeth == 1: if self.verbose: s += f"an array of shape {(nlay, nrow, ncol)}" print(s) - return binaryread( - self.file, self.realtype(1), shape=(nlay, nrow, ncol) - ) + return binaryread(self.file, self.realtype(1), shape=(nlay, nrow, ncol)) # imeth 2 elif imeth == 2: @@ -2016,10 +1973,7 @@ def get_record(self, idx, full3D=False): dtype = np.dtype([("node", np.int32), ("q", self.realtype)]) if self.verbose: if full3D: - s += ( - f"a numpy masked array of " - f"size ({nlay}, {nrow}, {ncol})" - ) + s += f"a numpy masked array of size ({nlay}, {nrow}, {ncol})" else: s += f"a numpy recarray of size ({nlist}, 2)" print(s) @@ -2035,10 +1989,7 @@ def get_record(self, idx, full3D=False): data = binaryread(self.file, self.realtype(1), shape=(nrow, ncol)) if self.verbose: if full3D: - s += ( - "a numpy masked array of size " - f"({nlay}, {nrow}, {ncol})" - ) + s += f"a numpy masked array of size ({nlay}, {nrow}, {ncol})" else: s += ( "a list of two 2D numpy arrays. The first is an " @@ -2204,9 +2155,7 @@ def get_residual(self, totim, scaled=False): residual = np.zeros((nlay, nrow, ncol), dtype=float) if scaled: inflow = np.zeros((nlay, nrow, ncol), dtype=float) - select_indices = np.asarray( - self.recordarray["totim"] == totim - ).nonzero()[0] + select_indices = np.asarray(self.recordarray["totim"] == totim).nonzero()[0] for i in select_indices: text = self.recordarray[i]["text"].decode() @@ -2311,9 +2260,7 @@ def reverse(self, filename: Optional[os.PathLike] = None): # make sure we have tdis if self.tdis is None or not any(self.tdis.perioddata.get_data()): - raise ValueError( - "tdis must be known to reverse a cell budget file" - ) + raise ValueError("tdis must be known to reverse a cell budget file") # extract perioddata pd = self.tdis.perioddata.get_data() @@ -2400,13 +2347,9 @@ def reverse(self, filename: Optional[os.PathLike] = None): # Write auxiliary column names naux = ndat - 1 if naux > 0: - auxtxt = [ - "{:16}".format(colname) for colname in colnames[3:] - ] + auxtxt = ["{:16}".format(colname) for colname in colnames[3:]] auxtxt = tuple(auxtxt) - dt = np.dtype( - [(colname, "S16") for colname in colnames[3:]] - ) + dt = np.dtype([(colname, "S16") for colname in colnames[3:]]) h = np.array(auxtxt, dtype=dt) h.tofile(f) # Write nlist diff --git a/flopy/utils/check.py b/flopy/utils/check.py index 0552baeded..22868ad11f 100644 --- a/flopy/utils/check.py +++ b/flopy/utils/check.py @@ -149,9 +149,7 @@ def _add_to_summary( col_list += [k, i, j] if self.structured else [node] col_list += [value, desc] sa = self._get_summary_array(np.array(col_list)) - self.summary_array = np.append(self.summary_array, sa).view( - np.recarray - ) + self.summary_array = np.append(self.summary_array, sa).view(np.recarray) def _boolean_compare( self, @@ -229,9 +227,7 @@ def _boolean_compare( cols = [ c for c in failed_info.dtype.names - if failed_info[c].sum() != 0 - and c != "diff" - and "tmp" not in c + if failed_info[c].sum() != 0 and c != "diff" and "tmp" not in c ] # currently failed_info[cols] results in a warning. Not sure # how to do this properly with a recarray. @@ -256,9 +252,7 @@ def _get_summary_array(self, array=None): ra = recarray(array, dtype) return ra - def _txt_footer( - self, headertxt, txt, testname, passed=False, warning=True - ): + def _txt_footer(self, headertxt, txt, testname, passed=False, warning=True): """ if len(txt) == 0 or passed: txt += 'passed.' @@ -286,9 +280,7 @@ def _stress_period_data_valid_indices(self, stress_period_data): error_name="invalid BC index", error_type="Error", ) - self.summary_array = np.append(self.summary_array, sa).view( - np.recarray - ) + self.summary_array = np.append(self.summary_array, sa).view(np.recarray) spd_inds_valid = False self.remove_passed("BC indices valid") if spd_inds_valid: @@ -313,9 +305,7 @@ def _stress_period_data_nans(self, stress_period_data, nan_excl_list): error_name="Not a number", error_type="Error", ) - self.summary_array = np.append(self.summary_array, sa).view( - np.recarray - ) + self.summary_array = np.append(self.summary_array, sa).view(np.recarray) self.remove_passed("not a number (Nan) entries") else: self.append_passed("not a number (Nan) entries") @@ -337,9 +327,7 @@ def _stress_period_data_inactivecells(self, stress_period_data): error_name=msg, error_type="Warning", ) - self.summary_array = np.append(self.summary_array, sa).view( - np.recarray - ) + self.summary_array = np.append(self.summary_array, sa).view(np.recarray) self.remove_passed(f"{msg}s") else: self.append_passed(f"{msg}s") @@ -453,9 +441,7 @@ def get_active(self, include_cbd=False): # make ibound of same shape as thicknesses/botm for quasi-3D models active = self.model.bas6.ibound.array != 0 if include_cbd and dis.laycbd.sum() > 0: - laycbd = np.flatnonzero( - dis.laycbd.array > 0 - ) # cbd layer index + laycbd = np.flatnonzero(dis.laycbd.array > 0) # cbd layer index active = np.insert(active, laycbd, active[laycbd], axis=0) else: # if bas package is missing @@ -493,9 +479,7 @@ def stress_period_data_values( error_name=error_name, error_type=error_type, ) - self.summary_array = np.append(self.summary_array, sa).view( - np.recarray - ) + self.summary_array = np.append(self.summary_array, sa).view(np.recarray) self.remove_passed(error_name) else: self.append_passed(error_name) @@ -517,14 +501,10 @@ def values(self, a, criteria, error_name="", error_type="Warning"): # but indsT will only have two columns if a 2-D array is being compared # pad indsT with a column of zeros for k if indsT.shape[1] == 2: - indsT = np.column_stack( - [np.zeros(indsT.shape[0], dtype=int), indsT] - ) + indsT = np.column_stack([np.zeros(indsT.shape[0], dtype=int), indsT]) sa = np.column_stack([tp, pn, indsT, v, en]) sa = self._get_summary_array(sa) - self.summary_array = np.append(self.summary_array, sa).view( - np.recarray - ) + self.summary_array = np.append(self.summary_array, sa).view(np.recarray) self.remove_passed(error_name) else: self.append_passed(error_name) @@ -573,11 +553,7 @@ def summarize(self, scrub: bool = False): if txt == "": txt += " No errors or warnings encountered.\n" - elif ( - self.f is not None - and self.verbose - and self.summary_array.shape[0] > 0 - ): + elif self.f is not None and self.verbose and self.summary_array.shape[0] > 0: txt += f" see {relpath_safe(self.summaryfile, scrub=scrub)} for details.\n" # print checks that passed for higher levels @@ -613,8 +589,7 @@ def _has_cell_indices(self, stress_period_data): ) != {"k", "i", "j"}: self._add_to_summary( type="Error", - desc="\r Stress period data missing k, " - "i, j for structured grid.", + desc="\r Stress period data missing k, i, j for structured grid.", ) return False elif ( @@ -706,9 +681,7 @@ def get_neighbors(self, a): if isinstance(a, Util3d): a = a.array pad_value = int(-1e9) - n_max = ( - np.max(disu.iac.array) - 1 - ) # -1 for self, removed below + n_max = np.max(disu.iac.array) - 1 # -1 for self, removed below arr_neighbors = [ np.pad( a[n - 1], @@ -718,9 +691,7 @@ def get_neighbors(self, a): ) for n in neighbors ] - arr_neighbors = np.where( - arr_neighbors == -1e9, np.nan, arr_neighbors - ) + arr_neighbors = np.where(arr_neighbors == -1e9, np.nan, arr_neighbors) neighbors = arr_neighbors.T else: # if no disu, we can't define neighbours for this ugrid @@ -745,9 +716,7 @@ def _fmt_string_list(array, float_format="{}"): "recarray to file - change to 'object' type" ) else: - raise Exception( - f"MfList.fmt_string error: unknown vtype in dtype:{vtype}" - ) + raise Exception(f"MfList.fmt_string error: unknown vtype in dtype:{vtype}") return fmt_string @@ -842,9 +811,7 @@ def _get_cell_inds(self, spd): for item in zip(*cellid): hnames += ( - np.ndarray( - shape=(len(item),), buffer=np.array(item), dtype=np.int32 - ), + np.ndarray(shape=(len(item),), buffer=np.array(item), dtype=np.int32), ) return hnames diff --git a/flopy/utils/compare.py b/flopy/utils/compare.py index a69538a430..89c10619ed 100644 --- a/flopy/utils/compare.py +++ b/flopy/utils/compare.py @@ -84,12 +84,8 @@ def compare_budget( max_cumpd=0.01, max_incpd=0.01, outfile: Optional[Union[str, os.PathLike]] = None, - files1: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, - files2: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, + files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, + files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, ): """Compare the budget results from two simulations. @@ -218,10 +214,7 @@ def compare_budget( maxcolname = max(maxcolname, len(colname)) s = 2 * "\n" - s += ( - f"STRESS PERIOD: {kper[jdx] + 1} " - + f"TIME STEP: {kstp[jdx] + 1}" - ) + s += f"STRESS PERIOD: {kper[jdx] + 1} " + f"TIME STEP: {kstp[jdx] + 1}" f.write(s) if idx == 0: @@ -291,12 +284,8 @@ def compare_swrbudget( max_cumpd=0.01, max_incpd=0.01, outfile: Optional[Union[str, os.PathLike]] = None, - files1: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, - files2: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, + files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, + files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, ): """Compare the SWR budget results from two simulations. @@ -418,10 +407,7 @@ def compare_swrbudget( maxcolname = max(maxcolname, len(colname)) s = 2 * "\n" - s += ( - f"STRESS PERIOD: {kper[jdx] + 1} " - + f"TIME STEP: {kstp[jdx] + 1}" - ) + s += f"STRESS PERIOD: {kper[jdx] + 1} " + f"TIME STEP: {kstp[jdx] + 1}" f.write(s) if idx == 0: @@ -492,12 +478,8 @@ def compare_heads( text2=None, htol=0.001, outfile: Optional[Union[str, os.PathLike]] = None, - files1: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, - files2: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, + files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, + files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, difftol=False, verbose=False, exfile: Optional[Union[str, os.PathLike]] = None, @@ -687,10 +669,7 @@ def compare_heads( try: exd = np.genfromtxt(exfile).flatten() except: - e = ( - "Could not read exclusion " - + f"file {os.path.basename(exfile)}" - ) + e = "Could not read exclusion " + f"file {os.path.basename(exfile)}" print(e) return False else: @@ -715,9 +694,7 @@ def compare_heads( status1 = status1.upper() unstructured1 = False if status1 == dbs: - headobj1 = HeadFile( - hfpth1, precision=precision, verbose=verbose, text=text - ) + headobj1 = HeadFile(hfpth1, precision=precision, verbose=verbose, text=text) txt = headobj1.recordarray["text"][0] if isinstance(txt, bytes): txt = txt.decode("utf-8") @@ -730,9 +707,7 @@ def compare_heads( status2 = status2.upper() unstructured2 = False if status2 == dbs: - headobj2 = HeadFile( - hfpth2, precision=precision, verbose=verbose, text=text2 - ) + headobj2 = HeadFile(hfpth2, precision=precision, verbose=verbose, text=text2) txt = headobj2.recordarray["text"][0] if isinstance(txt, bytes): txt = txt.decode("utf-8") @@ -883,12 +858,8 @@ def compare_concentrations( precision="auto", ctol=0.001, outfile: Optional[Union[str, os.PathLike]] = None, - files1: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, - files2: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, + files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, + files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, difftol=False, verbose=False, ): @@ -1114,12 +1085,8 @@ def compare_concentrations( def compare_stages( namefile1: Union[str, os.PathLike] = None, namefile2: Union[str, os.PathLike] = None, - files1: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, - files2: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, + files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, + files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, htol=0.001, outfile: Optional[Union[str, os.PathLike]] = None, difftol=False, @@ -1336,12 +1303,8 @@ def compare( htol=0.001, outfile1: Optional[Union[str, os.PathLike]] = None, outfile2: Optional[Union[str, os.PathLike]] = None, - files1: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, - files2: Optional[ - Union[str, os.PathLike, list[Union[str, os.PathLike]]] - ] = None, + files1: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, + files2: Optional[Union[str, os.PathLike, list[Union[str, os.PathLike]]]] = None, ): """Compare the budget and head results for two MODFLOW-based model simulations. diff --git a/flopy/utils/cvfdutil.py b/flopy/utils/cvfdutil.py index 3a59031d2b..c8d0b19894 100644 --- a/flopy/utils/cvfdutil.py +++ b/flopy/utils/cvfdutil.py @@ -360,9 +360,7 @@ def get_disv_gridprops(verts, iverts, xcyc=None): if xcyc is None: xcyc = np.empty((ncpl, 2), dtype=float) for icell in range(ncpl): - vlist = [ - (verts[ivert, 0], verts[ivert, 1]) for ivert in iverts[icell] - ] + vlist = [(verts[ivert, 0], verts[ivert, 1]) for ivert in iverts[icell]] xcyc[icell, 0], xcyc[icell, 1] = centroid_of_polygon(vlist) else: assert xcyc.shape == (ncpl, 2) @@ -372,8 +370,7 @@ def get_disv_gridprops(verts, iverts, xcyc=None): cell2d = [] for i in range(ncpl): cell2d.append( - [i, xcyc[i, 0], xcyc[i, 1], len(iverts[i])] - + [iv for iv in iverts[i]] + [i, xcyc[i, 0], xcyc[i, 1], len(iverts[i])] + [iv for iv in iverts[i]] ) gridprops = {} gridprops["ncpl"] = ncpl diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index 90c0cd37e4..f5a52049a1 100644 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -129,8 +129,9 @@ def __init__(self, filetype=None, precision="single"): self.dtype = None self.header = None print( - "Specified {} type is not available. " - "Available types are:".format(self.header_type) + "Specified {} type is not available. Available types are:".format( + self.header_type + ) ) for idx, t in enumerate(self.header_types): print(f" {idx + 1} {t}") @@ -165,9 +166,7 @@ class LayerFile: """ - def __init__( - self, filename: Union[str, os.PathLike], precision, verbose, **kwargs - ): + def __init__(self, filename: Union[str, os.PathLike], precision, verbose, **kwargs): from ..discretization.structuredgrid import StructuredGrid self.filename = Path(filename).expanduser().absolute() @@ -292,9 +291,7 @@ def to_shapefile( """ plotarray = np.atleast_3d( - self.get_data( - kstpkper=kstpkper, totim=totim, mflay=mflay - ).transpose() + self.get_data(kstpkper=kstpkper, totim=totim, mflay=mflay).transpose() ).transpose() if mflay is not None: attrib_dict = {f"{attrib_name}{mflay}": plotarray[0, :, :]} @@ -403,15 +400,11 @@ def plot( else: i0 = 0 i1 = self.nlay - filenames = [ - f"{filename_base}_Layer{k + 1}.{fext}" for k in range(i0, i1) - ] + filenames = [f"{filename_base}_Layer{k + 1}.{fext}" for k in range(i0, i1)] # make sure we have a (lay,row,col) shape plotarray plotarray = np.atleast_3d( - self.get_data( - kstpkper=kstpkper, totim=totim, mflay=mflay - ).transpose() + self.get_data(kstpkper=kstpkper, totim=totim, mflay=mflay).transpose() ).transpose() from ..plot.plotutil import PlotUtilities @@ -472,9 +465,7 @@ def _get_data_array(self, totim=0): """ if totim >= 0.0: - keyindices = np.asarray( - self.recordarray["totim"] == totim - ).nonzero()[0] + keyindices = np.asarray(self.recordarray["totim"] == totim).nonzero()[0] if len(keyindices) == 0: msg = f"totim value ({totim}) not found in file..." raise Exception(msg) @@ -561,9 +552,7 @@ def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None): & (self.recordarray["kper"] == kper1) ).nonzero() if idx[0].shape[0] == 0: - raise Exception( - f"get_data() error: kstpkper not found:{kstpkper}" - ) + raise Exception(f"get_data() error: kstpkper not found:{kstpkper}") totim1 = self.recordarray[idx]["totim"][0] elif totim is not None: totim1 = totim @@ -646,8 +635,9 @@ def _build_kijlist(self, idx): fail = True if fail: raise Exception( - "Invalid cell index. Cell {} not within model grid: " - "{}".format((k, i, j), (self.nlay, self.nrow, self.ncol)) + "Invalid cell index. Cell {} not within model grid: {}".format( + (k, i, j), (self.nlay, self.nrow, self.ncol) + ) ) return kijlist diff --git a/flopy/utils/datautil.py b/flopy/utils/datautil.py index 0fba1bdb4b..011a4323df 100644 --- a/flopy/utils/datautil.py +++ b/flopy/utils/datautil.py @@ -5,10 +5,7 @@ def clean_filename(file_name): - if ( - file_name[0] in PyListUtil.quote_list - and file_name[-1] in PyListUtil.quote_list - ): + if file_name[0] in PyListUtil.quote_list and file_name[-1] in PyListUtil.quote_list: # quoted string # keep entire string and remove the quotes f_name = file_name.strip('"') @@ -83,11 +80,7 @@ def is_float(v): @staticmethod def is_basic_type(obj): - if ( - isinstance(obj, str) - or isinstance(obj, int) - or isinstance(obj, float) - ): + if isinstance(obj, str) or isinstance(obj, int) or isinstance(obj, float): return True return False @@ -103,9 +96,9 @@ def cellid_model_num(data_item_name, model_data, model_dim): model_num = data_item_name[7:] if DatumUtil.is_int(model_num): return int(model_num) - 1 - if ( - data_item_name == "cellidn" or data_item_name == "cellidsj" - ) and len(model_dim) > 0: + if (data_item_name == "cellidn" or data_item_name == "cellidsj") and len( + model_dim + ) > 0: return 0 elif data_item_name == "cellidm" and len(model_dim) > 1: return 1 @@ -192,8 +185,7 @@ def has_one_item(current_list): if len(current_list) != 1: return False if ( - isinstance(current_list[0], list) - or isinstance(current_list, np.ndarray) + isinstance(current_list[0], list) or isinstance(current_list, np.ndarray) ) and len(current_list[0] != 0): return False return True @@ -246,9 +238,7 @@ def first_item(current_list): return item @staticmethod - def next_item( - current_list, new_list=True, nesting_change=0, end_of_list=True - ): + def next_item(current_list, new_list=True, nesting_change=0, end_of_list=True): # returns the next item in a nested list along with other information: # (, , , # @@ -259,9 +249,7 @@ def next_item( else: list_size = 1 for item in current_list: - if isinstance(item, list) or isinstance( - current_list, np.ndarray - ): + if isinstance(item, list) or isinstance(current_list, np.ndarray): # still in a list of lists, recurse for item in PyListUtil.next_item( item, @@ -317,10 +305,7 @@ def reset_delimiter_used(): @staticmethod def split_data_line(line, external_file=False, delimiter_conf_length=15): - if ( - PyListUtil.line_num > delimiter_conf_length - and PyListUtil.consistent_delim - ): + if PyListUtil.line_num > delimiter_conf_length and PyListUtil.consistent_delim: # consistent delimiter has been found. continue using that # delimiter without doing further checks if PyListUtil.delimiter_used is None: @@ -328,9 +313,7 @@ def split_data_line(line, external_file=False, delimiter_conf_length=15): clean_line = comment_split[0].strip().split() else: comment_split = line.split("#", 1) - clean_line = ( - comment_split[0].strip().split(PyListUtil.delimiter_used) - ) + clean_line = comment_split[0].strip().split(PyListUtil.delimiter_used) if len(comment_split) > 1: clean_line.append("#") clean_line.append(comment_split[1].strip()) @@ -525,8 +508,7 @@ def __init__(self, mdlist=None, shape=None, callback=None): self.build_list(callback) else: raise Exception( - "MultiList requires either a mdlist or a shape " - "at initialization." + "MultiList requires either a mdlist or a shape at initialization." ) def __getitem__(self, k): @@ -568,21 +550,15 @@ def increment_dimension(self, dimension, callback): new_row_idx = len(self.multi_dim_list) self.multi_dim_list.append([]) for index in range(0, self.list_shape[1]): - self.multi_dim_list[-1].append( - callback((new_row_idx, index)) - ) + self.multi_dim_list[-1].append(callback((new_row_idx, index))) self.list_shape = (self.list_shape[0] + 1, self.list_shape[1]) elif dimension == 2: new_col_idx = len(self.multi_dim_list[0]) for index in range(0, self.list_shape[0]): - self.multi_dim_list[index].append( - callback((index, new_col_idx)) - ) + self.multi_dim_list[index].append(callback((index, new_col_idx))) self.list_shape = (self.list_shape[0], self.list_shape[1] + 1) else: - raise Exception( - 'For two dimensional lists "dimension" must ' "be 1 or 2." - ) + raise Exception('For two dimensional lists "dimension" must be 1 or 2.') def build_list(self, callback): entry_points = [(self.multi_dim_list, self.first_index())] @@ -602,9 +578,7 @@ def build_list(self, callback): new_location = (len(entry_point) - 1,) else: new_location = ((len(entry_point[0]) - 1), val) - new_entry_points.append( - (entry_point[0][-1], new_location) - ) + new_entry_points.append((entry_point[0][-1], new_location)) else: entry_point[0].append( callback(tuple(i + val for i in entry_point[1])) diff --git a/flopy/utils/flopy_io.py b/flopy/utils/flopy_io.py index 403f3e7a97..88e5a2f255 100644 --- a/flopy/utils/flopy_io.py +++ b/flopy/utils/flopy_io.py @@ -45,9 +45,7 @@ def _fmt_string(array, float_format="{}"): "recarray to file - change to 'object' type" ) else: - raise Exception( - f"MfList.fmt_string error: unknown vtype in dtype:{vtype}" - ) + raise Exception(f"MfList.fmt_string error: unknown vtype in dtype:{vtype}") return fmt_string @@ -324,9 +322,7 @@ def flux_to_wel(cbc_file, text, precision="single", model=None, verbose=False): return wel -def loadtxt( - file, delimiter=" ", dtype=None, skiprows=0, use_pandas=True, **kwargs -): +def loadtxt(file, delimiter=" ", dtype=None, skiprows=0, use_pandas=True, **kwargs): """ Use pandas to load a text file (significantly faster than n.loadtxt or genfromtxt see diff --git a/flopy/utils/formattedfile.py b/flopy/utils/formattedfile.py index 8d87cf6464..9f3074bfb4 100644 --- a/flopy/utils/formattedfile.py +++ b/flopy/utils/formattedfile.py @@ -65,10 +65,7 @@ def read_header(self, text_file): arrheader = header_text.split() # Verify header exists and is in the expected format - if ( - len(arrheader) >= 5 - and arrheader[4].upper() != self.text_ident.upper() - ): + if len(arrheader) >= 5 and arrheader[4].upper() != self.text_ident.upper(): raise Exception( "Expected header not found. Make sure the file being " "processed includes headers (LABEL output control option): " @@ -84,9 +81,7 @@ def read_header(self, text_file): or not is_int(arrheader[6]) or not is_int(arrheader[7]) ): - raise Exception( - f"Unexpected format for FHDTextHeader: {header_text}" - ) + raise Exception(f"Unexpected format for FHDTextHeader: {header_text}") headerinfo = np.empty([8], dtype=self.dtype) headerinfo["kstp"] = int(arrheader[0]) @@ -159,9 +154,7 @@ def _build_index(self): # provide headers as a pandas frame self.headers = pd.DataFrame(self.recordarray, index=self.iposarray) - self.headers["text"] = self.headers["text"].str.decode( - "ascii", "strict" - ) + self.headers["text"] = self.headers["text"].str.decode("ascii", "strict") def _store_record(self, header, ipos): """ diff --git a/flopy/utils/geometry.py b/flopy/utils/geometry.py index f99d8de9ad..55ee55a6e2 100644 --- a/flopy/utils/geometry.py +++ b/flopy/utils/geometry.py @@ -74,9 +74,7 @@ def __geo_interface__(self): if self.__type == "Polygon": geo_interface = { - "coordinates": tuple( - [self.exterior] + [i for i in self.interiors] - ), + "coordinates": tuple([self.exterior] + [i for i in self.interiors]), "type": self.__type, } @@ -135,9 +133,7 @@ def from_geojson(geo_interface): shape = LineString(geo_interface["coordinates"]) elif geo_interface["type"] == "MultiLineString": - geoms = [ - LineString(coords) for coords in geo_interface["coordinates"] - ] + geoms = [LineString(coords) for coords in geo_interface["coordinates"]] shape = MultiLineString(geoms) elif geo_interface["type"] == "Point": @@ -663,14 +659,10 @@ def rotate(x, y, xoff, yoff, angrot_radians): y = np.array(y) xrot = ( - xoff - + np.cos(angrot_radians) * (x - xoff) - - np.sin(angrot_radians) * (y - yoff) + xoff + np.cos(angrot_radians) * (x - xoff) - np.sin(angrot_radians) * (y - yoff) ) yrot = ( - yoff - + np.sin(angrot_radians) * (x - xoff) - + np.cos(angrot_radians) * (y - yoff) + yoff + np.sin(angrot_radians) * (x - xoff) + np.cos(angrot_radians) * (y - yoff) ) return xrot, yrot @@ -868,9 +860,9 @@ def point_in_polygon(xc, yc, polygon): num = len(polygon) j = num - 1 for i in range(num): - tmp = polygon[i][0] + (polygon[j][0] - polygon[i][0]) * ( - yc - polygon[i][1] - ) / (polygon[j][1] - polygon[i][1]) + tmp = polygon[i][0] + (polygon[j][0] - polygon[i][0]) * (yc - polygon[i][1]) / ( + polygon[j][1] - polygon[i][1] + ) comp = np.asarray( ((polygon[i][1] > yc) ^ (polygon[j][1] > yc)) & (xc < tmp) diff --git a/flopy/utils/geospatial_utils.py b/flopy/utils/geospatial_utils.py index 6101f951c6..80b02644a2 100644 --- a/flopy/utils/geospatial_utils.py +++ b/flopy/utils/geospatial_utils.py @@ -49,9 +49,7 @@ class GeoSpatialUtil: """ def __init__(self, obj, shapetype=None): - self.__shapefile = import_optional_dependency( - "shapefile", errors="silent" - ) + self.__shapefile = import_optional_dependency("shapefile", errors="silent") self.__obj = obj self.__geo_interface = {} self._geojson = None @@ -212,9 +210,7 @@ def shape(self): """ if self.__shapefile is not None: if self._shape is None: - self._shape = self.__shapefile.Shape._from_geojson( - self.__geo_interface - ) + self._shape = self.__shapefile.Shape._from_geojson(self.__geo_interface) return self._shape @property @@ -260,14 +256,10 @@ class GeoSpatialCollection: """ def __init__(self, obj, shapetype=None): - self.__shapefile = import_optional_dependency( - "shapefile", errors="silent" - ) + self.__shapefile = import_optional_dependency("shapefile", errors="silent") gpd = import_optional_dependency("geopandas", errors="silent") - shapely_geo = import_optional_dependency( - "shapely.geometry", errors="silent" - ) + shapely_geo = import_optional_dependency("shapely.geometry", errors="silent") self.__obj = obj self.__collection = [] @@ -317,9 +309,7 @@ def __init__(self, obj, shapetype=None): shapetype = [shapetype] * len(obj) for ix, geom in enumerate(obj): - self.__collection.append( - GeoSpatialUtil(geom, shapetype[ix]) - ) + self.__collection.append(GeoSpatialUtil(geom, shapetype[ix])) elif self.__shapefile is not None: if isinstance(obj, (str, os.PathLike)): diff --git a/flopy/utils/get_modflow.py b/flopy/utils/get_modflow.py index 31905a85ab..e186b1e4f8 100755 --- a/flopy/utils/get_modflow.py +++ b/flopy/utils/get_modflow.py @@ -105,9 +105,7 @@ def get_request(url, params={}): return urllib.request.Request(url, headers=headers) -def get_releases( - owner=None, repo=None, quiet=False, per_page=None -) -> List[str]: +def get_releases(owner=None, repo=None, quiet=False, per_page=None) -> List[str]: """Get list of available releases.""" owner = default_owner if owner is None else owner repo = default_repo if repo is None else repo @@ -215,9 +213,7 @@ def columns_str(items, line_chars=79) -> str: lines = [] for row_num in range(num_rows): row_items = items[row_num::num_rows] - lines.append( - " ".join(item.ljust(item_chars) for item in row_items).rstrip() - ) + lines.append(" ".join(item.ljust(item_chars) for item in row_items).rstrip()) return "\n".join(lines) @@ -230,9 +226,7 @@ def get_bindir_options(previous=None) -> Dict[str, Tuple[Path, str]]: if within_flopy: # don't check is_dir() or access yet options[":flopy"] = (flopy_appdata_path / "bin", "used by FloPy") # Python bin (same for standard or conda varieties) - py_bin = Path(sys.prefix) / ( - "Scripts" if get_ostag().startswith("win") else "bin" - ) + py_bin = Path(sys.prefix) / ("Scripts" if get_ostag().startswith("win") else "bin") if py_bin.is_dir() and os.access(py_bin, os.W_OK): options[":python"] = (py_bin, "used by Python") home_local_bin = Path.home() / ".local" / "bin" @@ -242,9 +236,7 @@ def get_bindir_options(previous=None) -> Dict[str, Tuple[Path, str]]: if local_bin.is_dir() and os.access(local_bin, os.W_OK): options[":system"] = (local_bin, "system local bindir") # Windows user - windowsapps_dir = Path( - os.path.expandvars(r"%LOCALAPPDATA%\Microsoft\WindowsApps") - ) + windowsapps_dir = Path(os.path.expandvars(r"%LOCALAPPDATA%\Microsoft\WindowsApps")) if windowsapps_dir.is_dir() and os.access(windowsapps_dir, os.W_OK): options[":windowsapps"] = (windowsapps_dir, "User App path") @@ -264,20 +256,16 @@ def select_bindir(bindir, previous=None, quiet=False, is_cli=False) -> Path: sel = list(opt for opt in options if opt.startswith(bindir.lower())) if len(sel) != 1: opt_avail = ", ".join( - f"'{opt}' for '{optpath}'" - for opt, (optpath, _) in options.items() - ) - raise ValueError( - f"invalid option '{bindir}', choose from: {opt_avail}" + f"'{opt}' for '{optpath}'" for opt, (optpath, _) in options.items() ) + raise ValueError(f"invalid option '{bindir}', choose from: {opt_avail}") if not quiet: print(f"auto-selecting option {sel[0]!r} for 'bindir'") return Path(options[sel[0]][0]).resolve() else: if not is_cli: opt_avail = ", ".join( - f"'{opt}' for '{optpath}'" - for opt, (optpath, _) in options.items() + f"'{opt}' for '{optpath}'" for opt, (optpath, _) in options.items() ) raise ValueError(f"specify the option, choose from: {opt_avail}") @@ -298,9 +286,7 @@ def select_bindir(bindir, previous=None, quiet=False, is_cli=False) -> Path: if num_tries < 2: print("invalid option, try choosing option again") else: - raise RuntimeError( - "invalid option, too many attempts" - ) from None + raise RuntimeError("invalid option, too many attempts") from None def run_main( @@ -415,9 +401,7 @@ def run_main( # make sure repo option is valid if repo not in available_repos: - raise KeyError( - f"repo {repo!r} not supported; choose one of {available_repos}" - ) + raise KeyError(f"repo {repo!r} not supported; choose one of {available_repos}") # get the selected release release = get_release(owner, repo, release_id, quiet) @@ -438,9 +422,7 @@ def run_main( dst_fname = "-".join([repo, release["tag_name"], ostag]) + asset_suffix else: # change local download name so it is more unique - dst_fname = "-".join( - [renamed_prefix[repo], release["tag_name"], asset_name] - ) + dst_fname = "-".join([renamed_prefix[repo], release["tag_name"], asset_name]) tmpdir = None if downloads_dir is None: downloads_dir = Path.home() / "Downloads" @@ -450,13 +432,9 @@ def run_main( else: # check user-defined downloads_dir = Path(downloads_dir) if not downloads_dir.is_dir(): - raise OSError( - f"downloads directory '{downloads_dir}' does not exist" - ) + raise OSError(f"downloads directory '{downloads_dir}' does not exist") elif not os.access(downloads_dir, os.W_OK): - raise OSError( - f"downloads directory '{downloads_dir}' is not writable" - ) + raise OSError(f"downloads directory '{downloads_dir}' is not writable") download_pth = downloads_dir / dst_fname if download_pth.is_file() and not force: if not quiet: @@ -551,25 +529,18 @@ def add_item(key, fname, do_chmod): for key in sorted(code): if code[key].get("shared_object"): fname = f"{key}{lib_suffix}" - if nosub or ( - subset and (key in subset or fname in subset) - ): + if nosub or (subset and (key in subset or fname in subset)): add_item(key, fname, do_chmod=False) else: fname = f"{key}{exe_suffix}" - if nosub or ( - subset and (key in subset or fname in subset) - ): + if nosub or (subset and (key in subset or fname in subset)): add_item(key, fname, do_chmod=True) # check if double version exists fname = f"{key}dbl{exe_suffix}" if ( code[key].get("double_switch", True) and fname in files - and ( - nosub - or (subset and (key in subset or fname in subset)) - ) + and (nosub or (subset and (key in subset or fname in subset))) ): add_item(key, fname, do_chmod=True) @@ -745,9 +716,7 @@ def cli_main(): help="Force re-download archive. Default behavior will use archive if " "previously downloaded in downloads-dir.", ) - parser.add_argument( - "--quiet", action="store_true", help="Show fewer messages." - ) + parser.add_argument("--quiet", action="store_true", help="Show fewer messages.") args = vars(parser.parse_args()) try: run_main(**args, _is_cli=True) diff --git a/flopy/utils/gridgen.py b/flopy/utils/gridgen.py index 4b7b15a913..2bed71fc65 100644 --- a/flopy/utils/gridgen.py +++ b/flopy/utils/gridgen.py @@ -38,9 +38,7 @@ def read1d(f, a): return a -def features_to_shapefile( - features, featuretype, filename: Union[str, os.PathLike] -): +def features_to_shapefile(features, featuretype, filename: Union[str, os.PathLike]): """ Write a shapefile for the features of type featuretype. @@ -106,9 +104,7 @@ def features_to_shapefile( wr.close() -def ndarray_to_asciigrid( - fname: Union[str, os.PathLike], a, extent, nodata=1.0e30 -): +def ndarray_to_asciigrid(fname: Union[str, os.PathLike], a, extent, nodata=1.0e30): # extent info xmin, xmax, ymin, ymax = extent ncol, nrow = a.shape @@ -238,9 +234,7 @@ def __init__( self.modelgrid = modelgrid.parent.modelgrid else: - raise TypeError( - "A StructuredGrid object must be supplied to Gridgen" - ) + raise TypeError("A StructuredGrid object must be supplied to Gridgen") self.nlay = self.modelgrid.nlay self.nrow = self.modelgrid.nrow @@ -269,12 +263,8 @@ def __init__( if vertical_pass_through: self.vertical_pass_through = "True" - self.smoothing_level_vertical = kwargs.pop( - "smoothing_level_vertical", 1 - ) - self.smoothing_level_horizontal = kwargs.pop( - "smoothing_level_horizontal", 1 - ) + self.smoothing_level_vertical = kwargs.pop("smoothing_level_vertical", 1) + self.smoothing_level_horizontal = kwargs.pop("smoothing_level_horizontal", 1) # Set up a blank _active_domain list with None for each layer self._addict = {} self._active_domain = [] @@ -289,9 +279,7 @@ def __init__( # Set up blank _elev and _elev_extent dictionaries self._asciigrid_dict = {} - def set_surface_interpolation( - self, isurf, type, elev=None, elev_extent=None - ): + def set_surface_interpolation(self, isurf, type, elev=None, elev_extent=None): """ Parameters ---------- @@ -326,9 +314,7 @@ def set_surface_interpolation( if type == "ASCIIGRID": if isinstance(elev, np.ndarray): if elev_extent is None: - raise ValueError( - "ASCIIGRID was specified but elev_extent was not." - ) + raise ValueError("ASCIIGRID was specified but elev_extent was not.") try: xmin, xmax, ymin, ymax = elev_extent except: @@ -360,9 +346,7 @@ def _resolve(p): return path if path.is_file() else self.model_ws / p path = _resolve(p) - path = ( - path if path.is_file() else _resolve(Path(p).with_suffix(".shp")) - ) + path = path if path.is_file() else _resolve(Path(p).with_suffix(".shp")) return path if path.is_file() else None def add_active_domain(self, feature, layers): @@ -408,9 +392,7 @@ def add_active_domain(self, feature, layers): ), f"Shapefile does not exist: {shapefile_path}" # store shapefile info - self._addict[shapefile_path.stem] = relpath_safe( - shapefile_path, self.model_ws - ) + self._addict[shapefile_path.stem] = relpath_safe(shapefile_path, self.model_ws) for k in layers: self._active_domain[k] = shapefile_path.stem @@ -593,9 +575,7 @@ def export(self, verbose=False): f.write("\n") f.write(self._grid_export_blocks()) f.close() - assert os.path.isfile( - fname - ), f"Could not create export dfn file: {fname}" + assert os.path.isfile(fname), f"Could not create export dfn file: {fname}" # Export shapefiles cmds = [ @@ -815,9 +795,7 @@ def get_disu( self.nodes = nodes # nodelay - nodelay = self.read_qtg_nodesperlay_dat( - model_ws=self.model_ws, nlay=nlay - ) + nodelay = self.read_qtg_nodesperlay_dat(model_ws=self.model_ws, nlay=nlay) # top top = [0] * nlay @@ -964,9 +942,7 @@ def get_nodelay(self): """ nlay = self.get_nlay() - nodelay = self.read_qtg_nodesperlay_dat( - model_ws=self.model_ws, nlay=nlay - ) + nodelay = self.read_qtg_nodesperlay_dat(model_ws=self.model_ws, nlay=nlay) return nodelay def get_top(self): @@ -1302,9 +1278,7 @@ def get_verts_iverts(self, ncells, verbose=False): """ from .cvfdutil import to_cvfd - verts, iverts = to_cvfd( - self._vertdict, nodestop=ncells, verbose=verbose - ) + verts, iverts = to_cvfd(self._vertdict, nodestop=ncells, verbose=verbose) return verts, iverts def get_cellxy(self, ncells): @@ -1763,9 +1737,7 @@ def _mfgrid_block(self): if bot.min() == bot.max(): s += f" BOTTOM LAYER {k + 1} = CONSTANT {bot.min()}\n" else: - s += " BOTTOM LAYER {0} = OPEN/CLOSE bot{0}.dat\n".format( - k + 1 - ) + s += " BOTTOM LAYER {0} = OPEN/CLOSE bot{0}.dat\n".format(k + 1) fname = os.path.join(self.model_ws, f"bot{k + 1}.dat") np.savetxt(fname, bot) @@ -1894,9 +1866,7 @@ def _mkvertdict(self): self._vertdict[nodenumber] = shapes[i].points @staticmethod - def read_qtg_nod( - model_ws: Union[str, os.PathLike], nodes_only: bool = False - ): + def read_qtg_nod(model_ws: Union[str, os.PathLike], nodes_only: bool = False): """Read qtg.nod file Parameters diff --git a/flopy/utils/gridintersect.py b/flopy/utils/gridintersect.py index e31631f6db..58b614f28d 100644 --- a/flopy/utils/gridintersect.py +++ b/flopy/utils/gridintersect.py @@ -220,10 +220,7 @@ def intersect( shp = gu.shapely if gu.shapetype in ("Point", "MultiPoint"): - if ( - self.method == "structured" - and self.mfgrid.grid_type == "structured" - ): + if self.method == "structured" and self.mfgrid.grid_type == "structured": rec = self._intersect_point_structured( shp, return_all_intersections=return_all_intersections ) @@ -234,10 +231,7 @@ def intersect( return_all_intersections=return_all_intersections, ) elif gu.shapetype in ("LineString", "MultiLineString"): - if ( - self.method == "structured" - and self.mfgrid.grid_type == "structured" - ): + if self.method == "structured" and self.mfgrid.grid_type == "structured": rec = self._intersect_linestring_structured( shp, keepzerolengths, @@ -251,10 +245,7 @@ def intersect( return_all_intersections=return_all_intersections, ) elif gu.shapetype in ("Polygon", "MultiPolygon"): - if ( - self.method == "structured" - and self.mfgrid.grid_type == "structured" - ): + if self.method == "structured" and self.mfgrid.grid_type == "structured": rec = self._intersect_polygon_structured( shp, contains_centroid=contains_centroid, @@ -378,9 +369,7 @@ def _vtx_grid_to_geoms_cellids(self): list( zip( *self.mfgrid.get_local_coords( - *np.array( - self.mfgrid.get_cell_vertices(node) - ).T + *np.array(self.mfgrid.get_cell_vertices(node)).T ) ) ) @@ -566,8 +555,7 @@ def parse_linestrings_in_geom_collection(gc): # arr=ixresult[mask_gc], # ) ixresult[mask_gc] = [ - parse_linestrings_in_geom_collection(gc) - for gc in ixresult[mask_gc] + parse_linestrings_in_geom_collection(gc) for gc in ixresult[mask_gc] ] if not return_all_intersections: @@ -589,9 +577,7 @@ def parse_linestrings_in_geom_collection(gc): # masks to obtain overlapping intersection result mask_self = idxs == i # select not self - mask_bnds_empty = shapely.is_empty( - isect - ) # select boundary ix result + mask_bnds_empty = shapely.is_empty(isect) # select boundary ix result mask_overlap = np.isin(shapely.get_type_id(isect), all_ids) # calculate difference between self and overlapping result @@ -673,9 +659,9 @@ def parse_polygons_in_geom_collection(gc): # check centroids if contains_centroid: centroids = shapely.centroid(self.geoms[qcellids]) - mask_centroid = shapely.contains( + mask_centroid = shapely.contains(ixresult, centroids) | shapely.touches( ixresult, centroids - ) | shapely.touches(ixresult, centroids) + ) ixresult = ixresult[mask_centroid] qcellids = qcellids[mask_centroid] @@ -861,9 +847,7 @@ def _intersect_point_structured(self, shp, return_all_intersections=False): tempnodes.append(node) tempshapes.append(ixs) else: - tempshapes[-1] = shapely_geo.MultiPoint( - [tempshapes[-1], ixs] - ) + tempshapes[-1] = shapely_geo.MultiPoint([tempshapes[-1], ixs]) ixshapes = tempshapes nodelist = tempnodes @@ -929,9 +913,7 @@ def _intersect_linestring_structured( shp, xoff=-self.mfgrid.xoffset, yoff=-self.mfgrid.yoffset ) if self.mfgrid.angrot != 0.0 and not self.local: - shp = affinity_loc.rotate( - shp, -self.mfgrid.angrot, origin=(0.0, 0.0) - ) + shp = affinity_loc.rotate(shp, -self.mfgrid.angrot, origin=(0.0, 0.0)) # clip line to mfgrid bbox lineclip = shp.intersection(pl) @@ -1042,9 +1024,7 @@ def _intersect_linestring_structured( templengths.append( sum([l for l, i in zip(lengths, nodelist) if i == inode]) ) - tempverts.append( - [v for v, i in zip(vertices, nodelist) if i == inode] - ) + tempverts.append([v for v, i in zip(vertices, nodelist) if i == inode]) tempshapes.append( [ix for ix, i in zip(ixshapes, nodelist) if i == inode] ) @@ -1197,9 +1177,7 @@ def _get_nodes_intersecting_linestring( return nodelist, lengths, vertices, ixshapes - def _check_adjacent_cells_intersecting_line( - self, linestring, i_j, nodelist - ): + def _check_adjacent_cells_intersecting_line(self, linestring, i_j, nodelist): """helper method that follows a line through a structured grid. .. deprecated:: 3.9.0 @@ -1509,9 +1487,7 @@ def _intersect_polygon_structured( shp, xoff=-self.mfgrid.xoffset, yoff=-self.mfgrid.yoffset ) if self.mfgrid.angrot != 0.0 and not self.local: - shp = affinity_loc.rotate( - shp, -self.mfgrid.angrot, origin=(0.0, 0.0) - ) + shp = affinity_loc.rotate(shp, -self.mfgrid.angrot, origin=(0.0, 0.0)) # use the bounds of the polygon to restrict the cell search minx, miny, maxx, maxy = shp.bounds @@ -1559,9 +1535,7 @@ def _intersect_polygon_structured( # option: min_area_fraction, only store if intersected area # is larger than fraction * cell_area if min_area_fraction: - if intersect.area < ( - min_area_fraction * cell_polygon.area - ): + if intersect.area < (min_area_fraction * cell_polygon.area): continue nodelist.append((i, j)) @@ -1577,13 +1551,9 @@ def _intersect_polygon_structured( v_realworld = [] if intersect.geom_type.startswith("Multi"): for ipoly in intersect.geoms: - v_realworld += ( - self._transform_geo_interface_polygon(ipoly) - ) + v_realworld += self._transform_geo_interface_polygon(ipoly) else: - v_realworld += self._transform_geo_interface_polygon( - intersect - ) + v_realworld += self._transform_geo_interface_polygon(intersect) intersect_realworld = affinity_loc.rotate( intersect, self.mfgrid.angrot, origin=(0.0, 0.0) ) @@ -1727,9 +1697,7 @@ def add_poly_patch(poly): # allow for result to be geodataframe geoms = ( - result.ixshapes - if isinstance(result, np.rec.recarray) - else result.geometry + result.ixshapes if isinstance(result, np.rec.recarray) else result.geometry ) for i, ishp in enumerate(geoms): if hasattr(ishp, "geoms"): @@ -1789,9 +1757,7 @@ def plot_linestring(result, ax=None, cmap=None, **kwargs): # allow for result to be geodataframe geoms = ( - result.ixshapes - if isinstance(result, np.rec.recarray) - else result.geometry + result.ixshapes if isinstance(result, np.rec.recarray) else result.geometry ) for i, ishp in enumerate(geoms): if not specified_color: @@ -1838,9 +1804,7 @@ def plot_point(result, ax=None, **kwargs): x, y = [], [] # allow for result to be geodataframe geoms = ( - result.ixshapes - if isinstance(result, np.rec.recarray) - else result.geometry + result.ixshapes if isinstance(result, np.rec.recarray) else result.geometry ) geo_coll = shapely_geo.GeometryCollection(list(geoms)) collection = parse_shapely_ix_result([], geo_coll, ["Point"]) @@ -1851,9 +1815,7 @@ def plot_point(result, ax=None, **kwargs): return ax - def plot_intersection_result( - self, result, plot_grid=True, ax=None, **kwargs - ): + def plot_intersection_result(self, result, plot_grid=True, ax=None, **kwargs): """Plot intersection result. Parameters @@ -2049,10 +2011,7 @@ def _polygon_patch(polygon, **kwargs): patch = PathPatch( Path.make_compound_path( Path(np.asarray(polygon.exterior.coords)[:, :2]), - *[ - Path(np.asarray(ring.coords)[:, :2]) - for ring in polygon.interiors - ], + *[Path(np.asarray(ring.coords)[:, :2]) for ring in polygon.interiors], ), **kwargs, ) diff --git a/flopy/utils/gridutil.py b/flopy/utils/gridutil.py index f3387c72da..fa8fccb124 100644 --- a/flopy/utils/gridutil.py +++ b/flopy/utils/gridutil.py @@ -293,9 +293,7 @@ def get_disv_kwargs( if np.isscalar(botm): botm = botm * np.ones((nlay, nrow, ncol), dtype=float) elif isinstance(botm, list): - assert ( - len(botm) == nlay - ), "if botm provided as a list it must have length nlay" + assert len(botm) == nlay, "if botm provided as a list it must have length nlay" b = np.empty((nlay, nrow, ncol), dtype=float) for k in range(nlay): b[k] = botm[k] diff --git a/flopy/utils/lgrutil.py b/flopy/utils/lgrutil.py index 467f46a702..07fc771152 100644 --- a/flopy/utils/lgrutil.py +++ b/flopy/utils/lgrutil.py @@ -155,9 +155,7 @@ def __init__( self.delrp = Util2d(m, (ncolp,), np.float32, delrp, "delrp").array self.delcp = Util2d(m, (nrowp,), np.float32, delcp, "delcp").array self.topp = Util2d(m, (nrowp, ncolp), np.float32, topp, "topp").array - self.botmp = Util3d( - m, (nlayp, nrowp, ncolp), np.float32, botmp, "botmp" - ).array + self.botmp = Util3d(m, (nlayp, nrowp, ncolp), np.float32, botmp, "botmp").array # idomain assert idomainp.shape == (nlayp, nrowp, ncolp) @@ -293,9 +291,7 @@ def get_replicated_parent_array(self, parent_array): """ assert parent_array.shape == (self.nrowp, self.ncolp) - child_array = np.empty( - (self.nrow, self.ncol), dtype=parent_array.dtype - ) + child_array = np.empty((self.nrow, self.ncol), dtype=parent_array.dtype) for ip in range(self.nprbeg, self.nprend + 1): for jp in range(self.npcbeg, self.npcend + 1): icrowstart = (ip - self.nprbeg) * self.ncpp @@ -706,9 +702,7 @@ def find_hanging_vertices(self): if cidomain[kc, ic, jc] == 0: continue - if ( - idir == -1 - ): # left child face connected to right parent face + if idir == -1: # left child face connected to right parent face # child vertices 0 and 3 added as hanging nodes if (ip, jp) in self.right_face_hanging: hlist = self.right_face_hanging.pop((ip, jp)) @@ -919,14 +913,10 @@ def get_xcyc(self): cidx = self.cgrid.idomain[0] > 0 px = self.pgrid.xcellcenters[pidx].flatten() cx = self.cgrid.xcellcenters[cidx].flatten() - xcyc[:, 0] = np.vstack( - (np.atleast_2d(px).T, np.atleast_2d(cx).T) - ).flatten() + xcyc[:, 0] = np.vstack((np.atleast_2d(px).T, np.atleast_2d(cx).T)).flatten() py = self.pgrid.ycellcenters[pidx].flatten() cy = self.cgrid.ycellcenters[cidx].flatten() - xcyc[:, 1] = np.vstack( - (np.atleast_2d(py).T, np.atleast_2d(cy).T) - ).flatten() + xcyc[:, 1] = np.vstack((np.atleast_2d(py).T, np.atleast_2d(cy).T)).flatten() return xcyc def get_top(self): diff --git a/flopy/utils/mflistfile.py b/flopy/utils/mflistfile.py index ced237728a..add18381e1 100644 --- a/flopy/utils/mflistfile.py +++ b/flopy/utils/mflistfile.py @@ -197,9 +197,7 @@ def get_kstpkper(self): if not self._isvalid: return None kstpkper = [] - for kstp, kper in zip( - self.inc["time_step"], self.inc["stress_period"] - ): + for kstp, kper in zip(self.inc["time_step"], self.inc["stress_period"]): kstpkper.append((kstp, kper)) return kstpkper @@ -301,11 +299,7 @@ def get_model_runtime(self, units="seconds"): # reopen the file self.f = open(self.file_name, "r", encoding="ascii", errors="replace") units = units.lower() - if ( - not units == "seconds" - and not units == "minutes" - and not units == "hours" - ): + if not units == "seconds" and not units == "minutes" and not units == "hours": raise AssertionError( '"units" input variable must be "minutes", "hours", ' f'or "seconds": {units} was specified' @@ -429,16 +423,12 @@ def get_data(self, kstpkper=None, idx=None, totim=None, incremental=False): try: ipos = self.get_kstpkper().index(kstpkper) except: - print( - f" could not retrieve kstpkper {kstpkper} from the lst file" - ) + print(f" could not retrieve kstpkper {kstpkper} from the lst file") elif totim is not None: try: ipos = self.get_times().index(totim) except: - print( - f" could not retrieve totime {totim} from the lst file" - ) + print(f" could not retrieve totime {totim} from the lst file") elif idx is not None: ipos = idx else: @@ -456,9 +446,7 @@ def get_data(self, kstpkper=None, idx=None, totim=None, incremental=False): else: t = self.cum[ipos] - dtype = np.dtype( - [("index", np.int32), ("value", np.float32), ("name", "|S25")] - ) + dtype = np.dtype([("index", np.int32), ("value", np.float32), ("name", "|S25")]) v = np.recarray(shape=(len(self.inc.dtype.names[3:])), dtype=dtype) for i, name in enumerate(self.inc.dtype.names[3:]): mult = 1.0 @@ -695,8 +683,7 @@ def _set_entries(self): ) except: raise Exception( - "unable to read budget information from first " - "entry in list file" + "unable to read budget information from first entry in list file" ) self.entries = incdict.keys() null_entries = {} @@ -885,14 +872,10 @@ def _get_totim(self, ts, sp, seekpoint): return np.nan, np.nan, np.nan elif ( ihead == 2 - and "SECONDS MINUTES HOURS DAYS YEARS" - not in line + and "SECONDS MINUTES HOURS DAYS YEARS" not in line ): break - elif ( - "-----------------------------------------------------------" - in line - ): + elif "-----------------------------------------------------------" in line: line = self.f.readline() break diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py index 695b9b83c4..550db47891 100644 --- a/flopy/utils/modpathfile.py +++ b/flopy/utils/modpathfile.py @@ -17,9 +17,7 @@ class ModpathFile(ParticleTrackFile): """Provides MODPATH output file support.""" - def __init__( - self, filename: Union[str, os.PathLike], verbose: bool = False - ): + def __init__(self, filename: Union[str, os.PathLike], verbose: bool = False): super().__init__(filename, verbose) self.output_type = self.__class__.__name__.lower().replace("file", "") ( @@ -67,10 +65,7 @@ def parse( if skiprows < 1: if f"MODPATH_{file_type.upper()}_FILE 6" in line.upper(): version = 6 - elif ( - f"MODPATH_{file_type.upper()}_FILE 7" - in line.upper() - ): + elif f"MODPATH_{file_type.upper()}_FILE 7" in line.upper(): version = 7 elif "MODPATH 5.0" in line.upper(): version = 5 @@ -95,16 +90,15 @@ def parse( return modpath, compact, skiprows, version, direction - def intersect( - self, cells, to_recarray - ) -> Union[list[np.recarray], np.recarray]: + def intersect(self, cells, to_recarray) -> Union[list[np.recarray], np.recarray]: if self.version < 7: try: raslice = self._data[["k", "i", "j"]] except (KeyError, ValueError): raise KeyError( - "could not extract 'k', 'i', and 'j' keys " - "from {} data".format(self.output_type.lower()) + "could not extract 'k', 'i', and 'j' keys from {} data".format( + self.output_type.lower() + ) ) else: try: @@ -232,9 +226,7 @@ class PathlineFile(ModpathFile): "sequencenumber", ] - def __init__( - self, filename: Union[str, os.PathLike], verbose: bool = False - ): + def __init__(self, filename: Union[str, os.PathLike], verbose: bool = False): super().__init__(filename, verbose=verbose) self.dtype, self._data = self._load() self.nid = np.unique(self._data["particleid"]) @@ -278,9 +270,7 @@ def _load(self) -> tuple[np.dtype, np.ndarray]: sequencenumber, group, particleid, pathlinecount = t[0:4] nrows += pathlinecount # read in the particle data - d = np.loadtxt( - itertools.islice(f, 0, pathlinecount), dtype=dtyper - ) + d = np.loadtxt(itertools.islice(f, 0, pathlinecount), dtype=dtyper) key = ( idx, sequencenumber, @@ -297,9 +287,7 @@ def _load(self) -> tuple[np.dtype, np.ndarray]: # fill data ipos0 = 0 for key, value in particle_pathlines.items(): - idx, sequencenumber, group, particleid, pathlinecount = key[ - 0:5 - ] + idx, sequencenumber, group, particleid, pathlinecount = key[0:5] ipos1 = ipos0 + pathlinecount # fill constant items for particle # particleid is not necessarily unique for all pathlines - use @@ -556,9 +544,7 @@ class EndpointFile(ModpathFile): "zone", ] - def __init__( - self, filename: Union[str, os.PathLike], verbose: bool = False - ): + def __init__(self, filename: Union[str, os.PathLike], verbose: bool = False): super().__init__(filename, verbose) self.dtype, self._data = self._load() self.nid = np.unique(self._data["particleid"]) @@ -665,9 +651,7 @@ def get_destination_endpoint_data(self, dest_cells, source=False): raslice = repack_fields(data[keys]) except (KeyError, ValueError): raise KeyError( - "could not extract " - + "', '".join(keys) - + " from endpoint data." + "could not extract " + "', '".join(keys) + " from endpoint data." ) else: if source: @@ -754,8 +738,7 @@ def write_shapefile( xcol, ycol, zcol = "x0", "y0", "z0" else: raise Exception( - 'flopy.map.plot_endpoint direction must be "ending" ' - 'or "starting".' + 'flopy.map.plot_endpoint direction must be "ending" or "starting".' ) if mg is None: raise ValueError("A modelgrid object was not provided.") diff --git a/flopy/utils/mtlistfile.py b/flopy/utils/mtlistfile.py index 7bae5ea049..5d562606e2 100644 --- a/flopy/utils/mtlistfile.py +++ b/flopy/utils/mtlistfile.py @@ -51,9 +51,7 @@ def __init__(self, file_name): return - def parse( - self, forgive=True, diff=True, start_datetime=None, time_unit="d" - ): + def parse(self, forgive=True, diff=True, start_datetime=None, time_unit="d"): """ Main entry point for parsing the list file. @@ -111,10 +109,8 @@ def parse( self._parse_sw(f, line) elif self.tkstp_key in line: try: - self.tkstp_overflow = ( - self._extract_number_between_strings( - line, self.tkstp_key, "in" - ) + self.tkstp_overflow = self._extract_number_between_strings( + line, self.tkstp_key, "in" ) except Exception as e: warnings.warn( @@ -175,15 +171,9 @@ def parse( return df_gw, df_sw def _diff(self, df): - out_cols = [ - c for c in df.columns if "_out" in c and not c.startswith("net_") - ] - in_cols = [ - c for c in df.columns if "_in" in c and not c.startswith("net_") - ] - add_cols = [ - c for c in df.columns if c not in out_cols + in_cols + ["totim"] - ] + out_cols = [c for c in df.columns if "_out" in c and not c.startswith("net_")] + in_cols = [c for c in df.columns if "_in" in c and not c.startswith("net_")] + add_cols = [c for c in df.columns if c not in out_cols + in_cols + ["totim"]] out_base = [c.replace("_out_", "_") for c in out_cols] in_base = [c.replace("_in_", "_") for c in in_cols] map_names = { @@ -240,15 +230,11 @@ def _parse_gw(self, f, line): for _ in range(7): line = self._readline(f) if line is None: - raise Exception( - "EOF while reading from component header to totim" - ) + raise Exception("EOF while reading from component header to totim") try: totim = float(line.split()[-2]) except Exception as e: - raise Exception( - f"error parsing totim on line {self.lcount}: {e!s}" - ) + raise Exception(f"error parsing totim on line {self.lcount}: {e!s}") for _ in range(3): line = self._readline(f) @@ -259,9 +245,7 @@ def _parse_gw(self, f, line): for _ in range(4): line = self._readline(f) if line is None: - raise Exception( - "EOF while reading from time step to particles" - ) + raise Exception("EOF while reading from time step to particles") try: kper = int(line[-6:-1]) @@ -301,9 +285,7 @@ def _parse_gw(self, f, line): try: item, ival, oval = self._parse_gw_line(line) except Exception as e: - raise Exception( - f"error parsing GW items on line {self.lcount}: {e!s}" - ) + raise Exception(f"error parsing GW items on line {self.lcount}: {e!s}") self._add_to_gw_data(item, ival, oval, comp) if break_next: break @@ -328,9 +310,7 @@ def _parse_gw(self, f, line): try: item, ival, oval = self._parse_gw_line(line) except Exception as e: - raise Exception( - f"error parsing GW items on line {self.lcount}: {e!s}" - ) + raise Exception(f"error parsing GW items on line {self.lcount}: {e!s}") self._add_to_gw_data(item, ival, oval, comp) if "discrepancy" in item: # can't rely on blank lines following block @@ -477,12 +457,8 @@ def _add_to_sw_data(self, inout, item, cval, fval, comp): self.sw_data[iitem].append(val) @staticmethod - def _extract_number_between_strings( - input_string, start_string, end_string - ): - pattern = ( - rf"{re.escape(start_string)}\s*(\d+)\s*{re.escape(end_string)}" - ) + def _extract_number_between_strings(input_string, start_string, end_string): + pattern = rf"{re.escape(start_string)}\s*(\d+)\s*{re.escape(end_string)}" match = re.search(pattern, input_string) if match: diff --git a/flopy/utils/observationfile.py b/flopy/utils/observationfile.py index 3e5308b3aa..e416bbb386 100644 --- a/flopy/utils/observationfile.py +++ b/flopy/utils/observationfile.py @@ -496,9 +496,7 @@ class CsvFile: """ - def __init__( - self, csvfile, delimiter=",", deletechars="", replace_space="" - ): + def __init__(self, csvfile, delimiter=",", deletechars="", replace_space=""): with open(csvfile) as self.file: self.delimiter = delimiter self.deletechars = deletechars diff --git a/flopy/utils/optionblock.py b/flopy/utils/optionblock.py index f999fef28a..0ebee3949b 100644 --- a/flopy/utils/optionblock.py +++ b/flopy/utils/optionblock.py @@ -132,9 +132,7 @@ def __repr__(self): if v == "None" and d[OptionBlock.optional]: pass else: - val.append( - str(object.__getattribute__(self, k)) - ) + val.append(str(object.__getattribute__(self, k))) if "None" in val: pass @@ -406,7 +404,9 @@ def load_options(cls, options, package): valid = True if not valid: - err_msg = f"Invalid type set to variable {k} in option block" + err_msg = ( + f"Invalid type set to variable {k} in option block" + ) raise TypeError(err_msg) option_line += t[ix] + " " diff --git a/flopy/utils/parse_version.py b/flopy/utils/parse_version.py index 8b4cdae897..dfa7c37e5c 100644 --- a/flopy/utils/parse_version.py +++ b/flopy/utils/parse_version.py @@ -246,9 +246,7 @@ def is_devrelease(self) -> bool: return False -_legacy_version_component_re = re.compile( - r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE -) +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) _legacy_version_replacement_map = { "pre": "c", @@ -337,9 +335,7 @@ def _legacy_cmpkey(version: str) -> LegacyCmpKey: class Version(_BaseVersion): - _regex = re.compile( - r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE - ) + _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) def __init__(self, version: str) -> None: # Validate the version and parse it into pieces @@ -351,16 +347,12 @@ def __init__(self, version: str) -> None: self._version = _Version( epoch=int(match.group("epoch")) if match.group("epoch") else 0, release=tuple(int(i) for i in match.group("release").split(".")), - pre=_parse_letter_version( - match.group("pre_l"), match.group("pre_n") - ), + pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")), post=_parse_letter_version( match.group("post_l"), match.group("post_n1") or match.group("post_n2"), ), - dev=_parse_letter_version( - match.group("dev_l"), match.group("dev_n") - ), + dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")), local=_parse_local_version(match.group("local")), ) @@ -541,9 +533,7 @@ def _cmpkey( # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. _release = tuple( - reversed( - list(itertools.dropwhile(lambda x: x == 0, reversed(release))) - ) + reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) ) # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. @@ -585,8 +575,7 @@ def _cmpkey( # - Shorter versions sort before longer versions when the prefixes # match exactly _local = tuple( - (i, "") if isinstance(i, int) else (NegativeInfinity, i) - for i in local + (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local ) return epoch, _release, _pre, _post, _dev, _local diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py index 378ff830ea..c5df347c02 100644 --- a/flopy/utils/particletrackfile.py +++ b/flopy/utils/particletrackfile.py @@ -79,9 +79,7 @@ def get_maxtime(self) -> float: """ return self._data["time"].max() - def get_data( - self, partid=0, totim=None, ge=True, minimal=False - ) -> np.recarray: + def get_data(self, partid=0, totim=None, ge=True, minimal=False) -> np.recarray: """ Get a single particle track, optionally filtering by time. @@ -153,9 +151,7 @@ def get_alldata(self, totim=None, ge=True, minimal=False): data = data[idx] return [data[data["particleid"] == i] for i in nids] - def get_destination_data( - self, dest_cells, to_recarray=True - ) -> np.recarray: + def get_destination_data(self, dest_cells, to_recarray=True) -> np.recarray: """ Get data for set of destination cells. @@ -318,9 +314,7 @@ def write_shapefile( x, y = geometry.transform(ra.x, ra.y, 0, 0, 0) z = ra.z geoms += [ - LineString( - [(x[i - 1], y[i - 1], z[i - 1]), (x[i], y[i], z[i])] - ) + LineString([(x[i - 1], y[i - 1], z[i - 1]), (x[i], y[i], z[i])]) for i in np.arange(1, (len(ra))) ] sdata += ra[1:].tolist() diff --git a/flopy/utils/postprocessing.py b/flopy/utils/postprocessing.py index 0f204cb7e8..922455bbb8 100644 --- a/flopy/utils/postprocessing.py +++ b/flopy/utils/postprocessing.py @@ -295,9 +295,7 @@ def get_extended_budget( matched_name = [s for s in rec_names if budget_term in s] if not matched_name: raise RuntimeError(budget_term + err_msg) - frf = cbf.get_data( - idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term - ) + frf = cbf.get_data(idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term) Qx_ext[:, :, 1:] = frf[0] # SWI2 package budget_term_swi = "SWIADDTOFRF" @@ -315,9 +313,7 @@ def get_extended_budget( matched_name = [s for s in rec_names if budget_term in s] if not matched_name: raise RuntimeError(budget_term + err_msg) - fff = cbf.get_data( - idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term - ) + fff = cbf.get_data(idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term) Qy_ext[:, 1:, :] = -fff[0] # SWI2 package budget_term_swi = "SWIADDTOFFF" @@ -335,9 +331,7 @@ def get_extended_budget( matched_name = [s for s in rec_names if budget_term in s] if not matched_name: raise RuntimeError(budget_term + err_msg) - flf = cbf.get_data( - idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term - ) + flf = cbf.get_data(idx=idx, kstpkper=kstpkper, totim=totim, text=budget_term) Qz_ext[1:, :, :] = -flf[0] # SWI2 package budget_term_swi = "SWIADDTOFLF" @@ -352,9 +346,7 @@ def get_extended_budget( if boundary_ifaces is not None: # need calculated heads for some stresses and to check hnoflo and hdry if hdsfile is None: - raise ValueError( - "hdsfile must be provided when using boundary_ifaces" - ) + raise ValueError("hdsfile must be provided when using boundary_ifaces") if isinstance(hdsfile, (bf.HeadFile, fm.FormattedHeadFile)): hds = hdsfile else: @@ -366,9 +358,7 @@ def get_extended_budget( # get hnoflo and hdry values if model is None: - raise ValueError( - "model must be provided when using boundary_ifaces" - ) + raise ValueError("model must be provided when using boundary_ifaces") noflo_or_dry = np.logical_or(head == model.hnoflo, head == model.hdry) for budget_term, iface_info in boundary_ifaces.items(): @@ -410,9 +400,7 @@ def get_extended_budget( np.logical_not(noflo_or_dry[lay, :, :]), np.logical_not(already_found), ) - already_found = np.logical_or( - already_found, water_table[lay, :, :] - ) + already_found = np.logical_or(already_found, water_table[lay, :, :]) Q_stress[np.logical_not(water_table)] = 0.0 # case where the same iface is assigned to all cells @@ -532,9 +520,7 @@ def get_extended_budget( elif iface == 6: Qz_ext[lay, row, col] -= Q_stress_cell else: - raise TypeError( - "boundary_ifaces value must be either int or list." - ) + raise TypeError("boundary_ifaces value must be either int or list.") return Qx_ext, Qy_ext, Qz_ext @@ -652,9 +638,7 @@ def get_specific_discharge( if modelgrid._idomain is None: modelgrid._idomain = model.dis.ibound if head is not None: - noflo_or_dry = np.logical_or( - head == model.hnoflo, head == model.hdry - ) + noflo_or_dry = np.logical_or(head == model.hnoflo, head == model.hdry) modelgrid._idomain[noflo_or_dry] = 0 # get cross section areas along x @@ -675,26 +659,16 @@ def get_specific_discharge( cross_area_x = ( delc[:] * 0.5 - * ( - saturated_thickness[:, :, :-1] - + saturated_thickness[:, :, 1:] - ) + * (saturated_thickness[:, :, :-1] + saturated_thickness[:, :, 1:]) ) cross_area_y = ( delr * 0.5 - * ( - saturated_thickness[:, 1:, :] - + saturated_thickness[:, :-1, :] - ) - ) - qx[:, :, 1:] = ( - 0.5 * (tqx[:, :, 2:] + tqx[:, :, 1:-1]) / cross_area_x + * (saturated_thickness[:, 1:, :] + saturated_thickness[:, :-1, :]) ) + qx[:, :, 1:] = 0.5 * (tqx[:, :, 2:] + tqx[:, :, 1:-1]) / cross_area_x qx[:, :, 0] = 0.5 * tqx[:, :, 1] / cross_area_x[:, :, 0] - qy[:, 1:, :] = ( - 0.5 * (tqy[:, 2:, :] + tqy[:, 1:-1, :]) / cross_area_y - ) + qy[:, 1:, :] = 0.5 * (tqy[:, 2:, :] + tqy[:, 1:-1, :]) / cross_area_y qy[:, 0, :] = 0.5 * tqy[:, 1, :] / cross_area_y[:, 0, :] qz = 0.5 * (tqz[1:, :, :] + tqz[:-1, :, :]) / cross_area_z diff --git a/flopy/utils/rasters.py b/flopy/utils/rasters.py index 39697f97cf..cb53d4ed07 100644 --- a/flopy/utils/rasters.py +++ b/flopy/utils/rasters.py @@ -114,9 +114,7 @@ def __init__( self._meta = meta self._dataset = None - self.__arr_dict = { - self._bands[b]: arr for b, arr in enumerate(self._array) - } + self.__arr_dict = {self._bands[b]: arr for b, arr in enumerate(self._array)} self.__xcenters = None self.__ycenters = None @@ -946,9 +944,7 @@ def raster_from_array( crs = modelgrid.crs if modelgrid.grid_type != "structured": - raise TypeError( - f"{type(modelgrid)} discretizations are not supported" - ) + raise TypeError(f"{type(modelgrid)} discretizations are not supported") if not np.all(modelgrid.delc == modelgrid.delc[0]): raise AssertionError("DELC must have a uniform spacing") @@ -959,9 +955,7 @@ def raster_from_array( yul = modelgrid.yvertices[0, 0] xul = modelgrid.xvertices[0, 0] angrot = modelgrid.angrot - transform = Affine( - modelgrid.delr[0], 0, xul, 0, -modelgrid.delc[0], yul - ) + transform = Affine(modelgrid.delr[0], 0, xul, 0, -modelgrid.delc[0], yul) if angrot != 0: transform *= Affine.rotation(angrot) diff --git a/flopy/utils/sfroutputfile.py b/flopy/utils/sfroutputfile.py index ba23a9b5b4..faf5f31338 100644 --- a/flopy/utils/sfroutputfile.py +++ b/flopy/utils/sfroutputfile.py @@ -96,9 +96,7 @@ def __init__(self, filename, geometries=None, verbose=False): "Cond", ] if has_gradient and has_delUzstor: - raise ValueError( - "column 16 should be either 'gradient' or 'Qwt', not both" - ) + raise ValueError("column 16 should be either 'gradient' or 'Qwt', not both") elif has_gradient: self.names.append("gradient") elif has_delUzstor: @@ -147,9 +145,7 @@ def get_nstrm(df): Number of SFR cells """ - wherereach1 = np.asarray( - (df.segment == 1) & (df.reach == 1) - ).nonzero()[0] + wherereach1 = np.asarray((df.segment == 1) & (df.reach == 1)).nonzero()[0] if len(wherereach1) == 1: return len(df) elif len(wherereach1) > 1: diff --git a/flopy/utils/swroutputfile.py b/flopy/utils/swroutputfile.py index eff2488688..eed918b87f 100644 --- a/flopy/utils/swroutputfile.py +++ b/flopy/utils/swroutputfile.py @@ -41,9 +41,7 @@ class SwrFile(FlopyBinaryData): """ - def __init__( - self, filename, swrtype="stage", precision="double", verbose=False - ): + def __init__(self, filename, swrtype="stage", precision="double", verbose=False): """ Class constructor. @@ -327,9 +325,7 @@ def get_ts(self, irec=0, iconn=0, klay=0, istr=0): return gage_record def _read_connectivity(self): - self.conn_dtype = np.dtype( - [("reach", "i4"), ("from", "i4"), ("to", "i4")] - ) + self.conn_dtype = np.dtype([("reach", "i4"), ("from", "i4"), ("to", "i4")]) conn = np.zeros((self.nrecord, 3), int) icount = 0 for nrg in range(self.flowitems): @@ -607,9 +603,7 @@ def _build_index(self): totim, dt, kper, kstp, kswr, success = self._read_header() if success: if self.type == "exchange": - bytes = self.nitems * ( - self.integerbyte + 8 * self.realbyte - ) + bytes = self.nitems * (self.integerbyte + 8 * self.realbyte) elif self.type == "structure": bytes = self.nitems * (5 * self.realbyte) else: @@ -626,9 +620,7 @@ def _build_index(self): else: if self.verbose: print() - self._recordarray = np.array( - self._recordarray, dtype=self.header_dtype - ) + self._recordarray = np.array(self._recordarray, dtype=self.header_dtype) self._times = np.array(self._times) self._kswrkstpkper = np.array(self._kswrkstpkper) return @@ -748,9 +740,7 @@ class SwrFlow(SwrFile): """ def __init__(self, filename, precision="double", verbose=False): - super().__init__( - filename, swrtype="flow", precision=precision, verbose=verbose - ) + super().__init__(filename, swrtype="flow", precision=precision, verbose=verbose) return diff --git a/flopy/utils/triangle.py b/flopy/utils/triangle.py index e378fb3184..822999b557 100644 --- a/flopy/utils/triangle.py +++ b/flopy/utils/triangle.py @@ -248,15 +248,11 @@ def plot( vertices = self.get_vertices() ncpl = len(cell2d) - modelgrid = VertexGrid( - vertices=vertices, cell2d=cell2d, ncpl=ncpl, nlay=1 - ) + modelgrid = VertexGrid(vertices=vertices, cell2d=cell2d, ncpl=ncpl, nlay=1) pmv = PlotMapView(modelgrid=modelgrid, ax=ax, layer=layer) if a is None: - pc = pmv.plot_grid( - facecolor=facecolor, edgecolor=edgecolor, **kwargs - ) + pc = pmv.plot_grid(facecolor=facecolor, edgecolor=edgecolor, **kwargs) else: pc = pmv.plot_array( a, diff --git a/flopy/utils/util_array.py b/flopy/utils/util_array.py index 8d341f51a2..0000345b72 100644 --- a/flopy/utils/util_array.py +++ b/flopy/utils/util_array.py @@ -415,9 +415,7 @@ def read1d(f, a): """ if len(a.shape) != 1: - raise ValueError( - f"read1d: expected 1 dimension, found shape {a.shape}" - ) + raise ValueError(f"read1d: expected 1 dimension, found shape {a.shape}") values = [] while len(values) < a.shape[0]: line = f.readline() @@ -548,9 +546,7 @@ def __init__( return if len(shape) != 3: - raise ValueError( - f"Util3d: expected 3 dimensions, found shape {shape}" - ) + raise ValueError(f"Util3d: expected 3 dimensions, found shape {shape}") self._model = model self.shape = shape self._dtype = dtype @@ -591,22 +587,16 @@ def __init__( ) else: for k in range(shape[0]): - self.ext_filename_base.append( - self.name_base[k].replace(" ", "_") - ) + self.ext_filename_base.append(self.name_base[k].replace(" ", "_")) self.util_2ds = self.build_2d_instances() def __setitem__(self, k, value): if isinstance(k, int): - assert k in range( - 0, self.shape[0] - ), "Util3d error: k not in range nlay" + assert k in range(0, self.shape[0]), "Util3d error: k not in range nlay" self.util_2ds[k] = new_u2d(self.util_2ds[k], value) else: - raise NotImplementedError( - f"Util3d doesn't support setitem indices: {k}" - ) + raise NotImplementedError(f"Util3d doesn't support setitem indices: {k}") def __setattr__(self, key, value): if hasattr(self, "util_2ds") and key == "cnstnt": @@ -735,9 +725,7 @@ def plot( return axes def __getitem__(self, k): - if isinstance(k, int) or np.issubdtype( - getattr(k, "dtype", None), np.integer - ): + if isinstance(k, int) or np.issubdtype(getattr(k, "dtype", None), np.integer): return self.util_2ds[k] elif len(k) == 3: return self.array[k[0], k[1], k[2]] @@ -795,9 +783,7 @@ def build_2d_instances(self): and isinstance(self.shape[2], (np.ndarray, list)) and len(self.__value) == np.sum(self.shape[2]) ): - self.__value = np.split( - self.__value, np.cumsum(self.shape[2])[:-1] - ) + self.__value = np.split(self.__value, np.cumsum(self.shape[2])[:-1]) # if this is a list or 1-D array with constant values per layer if isinstance(self.__value, list) or ( @@ -813,9 +799,7 @@ def build_2d_instances(self): if isinstance(item, Util2d): # we need to reset the external name because most of the # load() methods don't use layer-specific names - item._ext_filename = ( - f"{self.ext_filename_base[i]}{i + 1}.ref" - ) + item._ext_filename = f"{self.ext_filename_base[i]}{i + 1}.ref" # reset the model instance in cases these Util2d's # came from another model instance item.model = self._model @@ -824,9 +808,7 @@ def build_2d_instances(self): name = self.name_base[i] + str(i + 1) ext_filename = None if self._model.external_path is not None: - ext_filename = ( - f"{self.ext_filename_base[i]}{i + 1}.ref" - ) + ext_filename = f"{self.ext_filename_base[i]}{i + 1}.ref" shape = self.shape[1:] if shape[0] is None: # allow for unstructured so that ncol changes by layer @@ -893,9 +875,7 @@ def load( array_format=None, ): if len(shape) != 3: - raise ValueError( - f"Util3d: expected 3 dimensions, found shape {shape}" - ) + raise ValueError(f"Util3d: expected 3 dimensions, found shape {shape}") nlay, nrow, ncol = shape u2ds = [] for k in range(nlay): @@ -1172,9 +1152,7 @@ def build_transient_sequence(self): f"Transient3d error: can't cast key: {key} to kper integer" ) if key < 0: - raise Exception( - f"Transient3d error: key can't be negative: {key}" - ) + raise Exception(f"Transient3d error: key can't be negative: {key}") try: u3d = self.__get_3d_instance(key, val) except Exception as e: @@ -1435,9 +1413,7 @@ def __setattr__(self, key, value): elif hasattr(self, "transient_2ds") and key == "fmtin": # set fmtin for each u2d for kper, u2d in self.transient_2ds.items(): - self.transient_2ds[kper].format = ArrayFormat( - u2d, fortran=value - ) + self.transient_2ds[kper].format = ArrayFormat(u2d, fortran=value) elif hasattr(self, "transient_2ds") and key == "how": # set how for each u2d for kper, u2d in self.transient_2ds.items(): @@ -1620,9 +1596,7 @@ def build_transient_sequence(self): f"Transient2d error: can't cast key: {key} to kper integer" ) if key < 0: - raise Exception( - f"Transient2d error: key can't be negative: {key}" - ) + raise Exception(f"Transient2d error: key can't be negative: {key}") try: u2d = self.__get_2d_instance(key, val) except Exception as e: @@ -1847,8 +1821,7 @@ def __init__( self._model = model if len(shape) not in (1, 2): raise ValueError( - "Util2d: shape must describe 1- or 2-dimensions, " - "e.g. (nrow, ncol)" + "Util2d: shape must describe 1- or 2-dimensions, e.g. (nrow, ncol)" ) if min(shape) < 1: raise ValueError("Util2d: each shape dimension must be at least 1") @@ -2141,9 +2114,7 @@ def python_file_path(self): if self._model.model_ws != ".": python_file_path = os.path.join(self._model.model_ws) if self._model.external_path is not None: - python_file_path = os.path.join( - python_file_path, self._model.external_path - ) + python_file_path = os.path.join(python_file_path, self._model.external_path) python_file_path = os.path.join(python_file_path, self.filename) return python_file_path @@ -2171,9 +2142,7 @@ def model_file_path(self): model_file_path = "" if self._model.external_path is not None: - model_file_path = os.path.join( - model_file_path, self._model.external_path - ) + model_file_path = os.path.join(model_file_path, self._model.external_path) model_file_path = os.path.join(model_file_path, self.filename) return model_file_path @@ -2197,8 +2166,7 @@ def _get_fixed_cr(self, locat, value=None): if self.format.binary: if locat is None: raise Exception( - "Util2d._get_fixed_cr(): locat is None but " - "format is binary" + "Util2d._get_fixed_cr(): locat is None but format is binary" ) if not self.format.array_free_format: locat = -1 * np.abs(locat) @@ -2249,9 +2217,7 @@ def get_openclose_cr(self): def get_external_cr(self): locat = self._model.next_ext_unit() - self._model.add_external( - self.model_file_path, locat, self.format.binary - ) + self._model.add_external(self.model_file_path, locat, self.format.binary) if self.format.array_free_format: cr = "EXTERNAL {:>30d} {:15} {:>10s} {:2.0f} {:<30s}\n".format( locat, @@ -2366,9 +2332,7 @@ def get_file_entry(self, how=None): return self.get_constant_cr(value) else: - raise Exception( - f"Util2d.get_file_entry() error: unrecognized 'how':{how}" - ) + raise Exception(f"Util2d.get_file_entry() error: unrecognized 'how':{how}") @property def string(self): @@ -2570,9 +2534,7 @@ def load_txt(shape, file_in, dtype, fmtin): return data.reshape(shape) @staticmethod - def write_txt( - shape, file_out, data, fortran_format="(FREE)", python_format=None - ): + def write_txt(shape, file_out, data, fortran_format="(FREE)", python_format=None): if fortran_format.upper() == "(FREE)" and python_format is None: np.savetxt( file_out, @@ -2734,9 +2696,7 @@ def parse_value(self, value): f'Util2d:could not cast boolean value to type "bool": {value}' ) else: - raise Exception( - "Util2d:value type is bool, but dtype not set as bool" - ) + raise Exception("Util2d:value type is bool, but dtype not set as bool") elif isinstance(value, (str, os.PathLike)): if os.path.exists(value): self.__value = str(value) @@ -2790,9 +2750,7 @@ def parse_value(self, value): self.__value = value else: - raise Exception( - f"Util2d:unsupported type in util_array: {type(value)}" - ) + raise Exception(f"Util2d:unsupported type in util_array: {type(value)}") @classmethod def load( @@ -2866,9 +2824,7 @@ def load( ) else: f = open(fname, "rb") - header_data, data = Util2d.load_bin( - shape, f, dtype, bintype="Head" - ) + header_data, data = Util2d.load_bin(shape, f, dtype, bintype="Head") f.close() u2d = cls( model, @@ -2990,9 +2946,7 @@ def parse_control_record( try: fname = ext_unit_dict[nunit].filename.strip() except: - print( - f" could not determine filename for unit {raw[1]}" - ) + print(f" could not determine filename for unit {raw[1]}") if isfloat: cnstnt = float(raw[2].lower().replace("d", "e")) @@ -3017,9 +2971,7 @@ def parse_control_record( locat = int(line[0:10].strip()) if isfloat: if len(line) >= 20: - cnstnt = float( - line[10:20].strip().lower().replace("d", "e") - ) + cnstnt = float(line[10:20].strip().lower().replace("d", "e")) else: cnstnt = 0.0 else: @@ -3059,9 +3011,7 @@ def parse_control_record( freefmt = "block" nunit = current_unit elif locat == 102: - raise NotImplementedError( - "MT3D zonal format not supported..." - ) + raise NotImplementedError("MT3D zonal format not supported...") elif locat == 103: freefmt = "internal" nunit = current_unit diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py index 7cc6690de5..97ce673f45 100644 --- a/flopy/utils/util_list.py +++ b/flopy/utils/util_list.py @@ -194,16 +194,12 @@ def drop(self, fields): if not isinstance(fields, list): fields = [fields] names = [n for n in self.dtype.names if n not in fields] - dtype = np.dtype( - [(k, d) for k, d in self.dtype.descr if k not in fields] - ) + dtype = np.dtype([(k, d) for k, d in self.dtype.descr if k not in fields]) spd = {} for k, v in self.data.items(): # because np 1.9 doesn't support indexing by list of columns newarr = np.array([self.data[k][n] for n in names]).transpose() - newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view( - np.recarray - ) + newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(np.recarray) for n in dtype.names: newarr[n] = self.data[k][n] spd[k] = newarr @@ -315,9 +311,7 @@ def __cast_data(self, data): try: data = np.array(data) except Exception as e: - raise ValueError( - f"MfList error: casting list to ndarray: {e!s}" - ) + raise ValueError(f"MfList error: casting list to ndarray: {e!s}") # If data is a dict, the we have to assume it is keyed on kper if isinstance(data, dict): @@ -336,9 +330,7 @@ def __cast_data(self, data): try: d = np.array(d) except Exception as e: - raise ValueError( - f"MfList error: casting list to ndarray: {e}" - ) + raise ValueError(f"MfList error: casting list to ndarray: {e}") if isinstance(d, np.recarray): self.__cast_recarray(kper, d) @@ -375,9 +367,7 @@ def __cast_data(self, data): elif isinstance(data, str): self.__cast_str(0, data) else: - raise ValueError( - f"MfList error: unsupported data type: {type(data)}" - ) + raise ValueError(f"MfList error: unsupported data type: {type(data)}") def __cast_str(self, kper, d): # If d is a string, assume it is a filename and check that it exists @@ -419,19 +409,13 @@ def __cast_ndarray(self, kper, d): f"dtype len: {len(self.dtype)}" ) try: - self.__data[kper] = np.rec.fromarrays( - d.transpose(), dtype=self.dtype - ) + self.__data[kper] = np.rec.fromarrays(d.transpose(), dtype=self.dtype) except Exception as e: - raise ValueError( - f"MfList error: casting ndarray to recarray: {e!s}" - ) + raise ValueError(f"MfList error: casting ndarray to recarray: {e!s}") self.__vtype[kper] = np.recarray def __cast_dataframe(self, kper, d): - self.__cast_recarray( - kper, d.to_records(index=False).astype(self.dtype) - ) + self.__cast_recarray(kper, d.to_records(index=False).astype(self.dtype)) def get_dataframe(self, squeeze=False): """ @@ -494,9 +478,7 @@ def get_dataframe(self, squeeze=False): # squeeze: remove duplicate periods if squeeze: - changed = ( - df.groupby(["k", "i", "j", "no"]).diff().ne(0.0).any(axis=1) - ) + changed = df.groupby(["k", "i", "j", "no"]).diff().ne(0.0).any(axis=1) changed = changed.groupby("per").transform(lambda s: s.any()) df = df.loc[changed, :] @@ -536,9 +518,7 @@ def add_record(self, kper, index, values): self.__vtype[kper] = np.recarray elif self.vtype[kper] == np.recarray: # Extend the recarray - self.__data[kper] = np.append( - self.__data[kper], self.get_empty(1) - ) + self.__data[kper] = np.append(self.__data[kper], self.get_empty(1)) else: self.__data[kper] = self.get_empty(1) self.__vtype[kper] = np.recarray @@ -588,9 +568,7 @@ def __setitem__(self, kper, data): try: data = np.array(data) except Exception as e: - raise ValueError( - f"MfList error: casting list to ndarray: {e!s}" - ) + raise ValueError(f"MfList error: casting list to ndarray: {e!s}") # cast data if isinstance(data, int): self.__cast_int(kper, data) @@ -603,9 +581,7 @@ def __setitem__(self, kper, data): elif isinstance(data, str): self.__cast_str(kper, data) else: - raise ValueError( - f"MfList error: unsupported data type: {type(data)}" - ) + raise ValueError(f"MfList error: unsupported data type: {type(data)}") def __fromfile(self, f): try: @@ -629,10 +605,7 @@ def get_filenames(self): elif kper in kpers: kper_vtype = self.__vtype[kper] - if ( - self._model.array_free_format - and self._model.external_path is not None - ): + if self._model.array_free_format and self._model.external_path is not None: filename = f"{self.package.name[0]}_{kper:04d}.dat" filenames.append(filename) return filenames @@ -664,14 +637,8 @@ def write_transient( ), "MfList.write() error: f argument must be a file handle" kpers = list(self.data.keys()) pak_name_str = self.package.__class__.__name__.lower() - if (len(kpers) == 0) and ( - pak_name_str == "mfusgwel" - ): # must be cln wels - kpers += [ - kper - for kper in list(cln_data.data.keys()) - if kper not in kpers - ] + if (len(kpers) == 0) and (pak_name_str == "mfusgwel"): # must be cln wels + kpers += [kper for kper in list(cln_data.data.keys()) if kper not in kpers] kpers.sort() first = kpers[0] if single_per is None: @@ -764,9 +731,7 @@ def write_transient( if cln_data is not None: if cln_data.get_itmp(kper) is not None: - cln_data.write_transient( - f, single_per=kper, write_header=False - ) + cln_data.write_transient(f, single_per=kper, write_header=False) def __tofile(self, f, data): # Write the recarray (data) to the file (or file handle) f @@ -801,9 +766,7 @@ def check_kij(self): return nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper() if nl == 0: - warnings.warn( - "MfList.check_kij(): unable to get dis info from model" - ) + warnings.warn("MfList.check_kij(): unable to get dis info from model") return for kper in list(self.data.keys()): out_idx = [] @@ -887,9 +850,7 @@ def attribute_by_kper(self, attr, function=np.mean, idx_val=None): kper_data = self.__data[kper] if idx_val is not None: kper_data = kper_data[ - np.asarray( - kper_data[idx_val[0]] == idx_val[1] - ).nonzero() + np.asarray(kper_data[idx_val[0]] == idx_val[1]).nonzero() ] v = function(kper_data[attr]) values.append(v) @@ -1078,9 +1039,7 @@ def to_array(self, kper=0, mask=False): for name, arr in arrays.items(): if unstructured: - cnt = np.zeros( - (self._model.nlay * self._model.ncpl,), dtype=float - ) + cnt = np.zeros((self._model.nlay * self._model.ncpl,), dtype=float) else: cnt = np.zeros( (self._model.nlay, self._model.nrow, self._model.ncol), diff --git a/flopy/utils/utils_def.py b/flopy/utils/utils_def.py index 4212223947..38ef48d94b 100644 --- a/flopy/utils/utils_def.py +++ b/flopy/utils/utils_def.py @@ -166,9 +166,7 @@ def get_util2d_shape_for_layer(model, layer=0): return (nrow, ncol) -def get_unitnumber_from_ext_unit_dict( - model, pak_class, ext_unit_dict=None, ipakcb=0 -): +def get_unitnumber_from_ext_unit_dict(model, pak_class, ext_unit_dict=None, ipakcb=0): """ For a given modflow package, defines input file unit number, plus package input and (optionally) output (budget) save file names. @@ -198,9 +196,7 @@ def get_unitnumber_from_ext_unit_dict( ext_unit_dict, filetype=pak_class._ftype() ) if ipakcb > 0: - _, filenames[1] = model.get_ext_dict_attr( - ext_unit_dict, unit=ipakcb - ) + _, filenames[1] = model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) return unitnumber, filenames @@ -233,9 +229,7 @@ def type_from_iterable(_iter, index=0, _type=int, default_val=0): def get_open_file_object(fname_or_fobj, read_write="rw"): """Returns an open file object for either a file name or open file object.""" - openfile = not ( - hasattr(fname_or_fobj, "read") or hasattr(fname_or_fobj, "write") - ) + openfile = not (hasattr(fname_or_fobj, "read") or hasattr(fname_or_fobj, "write")) if openfile: filename = fname_or_fobj f_obj = open(filename, read_write) diff --git a/flopy/utils/utl_import.py b/flopy/utils/utl_import.py index a7943e6b50..46b24fae53 100644 --- a/flopy/utils/utl_import.py +++ b/flopy/utils/utl_import.py @@ -140,9 +140,7 @@ def import_optional_dependency( module_to_get = sys.modules[install_name] else: module_to_get = module - minimum_version = ( - min_version if min_version is not None else VERSIONS.get(parent) - ) + minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if Version(version) < Version(minimum_version): diff --git a/flopy/utils/voronoi.py b/flopy/utils/voronoi.py index db3d5fd26e..22319e6b44 100644 --- a/flopy/utils/voronoi.py +++ b/flopy/utils/voronoi.py @@ -281,9 +281,7 @@ def __init__(self, tri, **kwargs): if isinstance(tri, Triangle): verts, iverts, points = tri2vor(tri, **kwargs) else: - raise TypeError( - "The tri argument must be of type flopy.utils.Triangle" - ) + raise TypeError("The tri argument must be of type flopy.utils.Triangle") self.points = points self.verts = verts self.iverts = iverts @@ -303,9 +301,7 @@ def get_disv_gridprops(self): flopy.mf6.ModflowGwfdisv constructor """ - disv_gridprops = get_disv_gridprops( - self.verts, self.iverts, xcyc=self.points - ) + disv_gridprops = get_disv_gridprops(self.verts, self.iverts, xcyc=self.points) return disv_gridprops def get_disu5_gridprops(self): diff --git a/flopy/utils/zonbud.py b/flopy/utils/zonbud.py index 2d34934ac3..d5da9ccf40 100644 --- a/flopy/utils/zonbud.py +++ b/flopy/utils/zonbud.py @@ -61,17 +61,13 @@ def __init__( if isinstance(cbc_file, CellBudgetFile): self.cbc = cbc_file - elif isinstance(cbc_file, (str, os.PathLike)) and os.path.isfile( - cbc_file - ): + elif isinstance(cbc_file, (str, os.PathLike)) and os.path.isfile(cbc_file): self.cbc = CellBudgetFile(cbc_file) else: raise Exception(f"Cannot load cell budget file: {cbc_file}.") if isinstance(z, np.ndarray): - assert np.issubdtype( - z.dtype, np.integer - ), "Zones dtype must be integer" + assert np.issubdtype(z.dtype, np.integer), "Zones dtype must be integer" else: e = ( "Please pass zones as a numpy ndarray of (positive)" @@ -81,9 +77,7 @@ def __init__( # Check for negative zone values if np.any(z < 0): - raise Exception( - "Negative zone value(s) found:", np.unique(z[z < 0]) - ) + raise Exception("Negative zone value(s) found:", np.unique(z[z < 0])) self.dis = None if "model" in kwargs.keys(): @@ -130,9 +124,7 @@ def __init__( # Check dimensions of input zone array s = ( "Row/col dimensions of zone array {}" - " do not match model row/col dimensions {}".format( - z.shape, self.cbc_shape - ) + " do not match model row/col dimensions {}".format(z.shape, self.cbc_shape) ) assert z.shape[-2] == self.nrow and z.shape[-1] == self.ncol, s @@ -163,9 +155,7 @@ def __init__( for z, a in iter(aliases.items()): if z != 0 and z in self._zonenamedict.keys(): if z in seen: - raise Exception( - "Zones may not have more than 1 alias." - ) + raise Exception("Zones may not have more than 1 alias.") self._zonenamedict[z] = "_".join(a.split()) seen.append(z) @@ -177,9 +167,7 @@ def __init__( # Get imeth for each record in the CellBudgetFile record list self.imeth = {} for record in self.cbc.recordarray: - self.imeth[record["text"].strip().decode("utf-8")] = record[ - "imeth" - ] + self.imeth[record["text"].strip().decode("utf-8")] = record["imeth"] # INTERNAL FLOW TERMS ARE USED TO CALCULATE FLOW BETWEEN ZONES. # CONSTANT-HEAD TERMS ARE USED TO IDENTIFY WHERE CONSTANT-HEAD CELLS @@ -225,9 +213,7 @@ def __init__( if verbose: s = ( "Computing the budget for" - " time step {} in stress period {}".format( - kk[0] + 1, kk[1] + 1 - ) + " time step {} in stress period {}".format(kk[0] + 1, kk[1] + 1) ) print(s) self._compute_budget(kstpkper=kk) @@ -304,9 +290,7 @@ def _compute_budget(self, kstpkper=None, totim=None): return - def _add_empty_record( - self, recordarray, recname, kstpkper=None, totim=None - ): + def _add_empty_record(self, recordarray, recname, kstpkper=None, totim=None): """ Build an empty records based on the specified flow direction and record name for the given list of zones. @@ -366,9 +350,7 @@ def _initialize_budget_recordarray(self, kstpkper=None, totim=None): ("stress_period", "= 2: - data = self.cbc.get_data( - text=recname, kstpkper=kstpkper, totim=totim - )[0] + data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim)[ + 0 + ] # "FLOW RIGHT FACE" COMPUTE FLOW BETWEEN ZONES ACROSS COLUMNS. # COMPUTE FLOW ONLY BETWEEN A ZONE AND A HIGHER ZONE -- FLOW FROM @@ -617,9 +586,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) # Get indices with negative flow face values (into higher zone) # Don't include CH to CH flow (can occur if CHTOCH option is used) @@ -629,9 +596,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzl[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) # FLOW BETWEEN NODE J,I,K AND J+1,I,K k, i, j = np.asarray( @@ -656,9 +621,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzr[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) # Get indices with negative flow face values (into higher zone) # Don't include CH to CH flow (can occur if CHTOCH option is used) @@ -668,9 +631,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION k, i, j = np.asarray(ich == 1).nonzero() @@ -685,9 +646,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) @@ -695,9 +654,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi[tzi != 0]] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) k, i, j = np.asarray(ich == 1).nonzero() k, i, j = ( @@ -715,9 +672,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) @@ -725,9 +680,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) except Exception as e: print(e) @@ -749,9 +702,9 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): """ try: if self.nrow >= 2: - data = self.cbc.get_data( - text=recname, kstpkper=kstpkper, totim=totim - )[0] + data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim)[ + 0 + ] # "FLOW FRONT FACE" # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K @@ -767,17 +720,13 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K. k, i, j = np.asarray( @@ -791,17 +740,13 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION k, i, j = np.asarray(ich == 1).nonzero() @@ -816,9 +761,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) @@ -826,9 +769,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) k, i, j = np.asarray(ich == 1).nonzero() k, i, j = ( @@ -846,9 +787,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) @@ -856,9 +795,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) except Exception as e: print(e) @@ -881,9 +818,9 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): """ try: if self.nlay >= 2: - data = self.cbc.get_data( - text=recname, kstpkper=kstpkper, totim=totim - )[0] + data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim)[ + 0 + ] # "FLOW LOWER FACE" # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1 @@ -899,17 +836,13 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): (q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1 k, i, j = np.asarray( @@ -923,17 +856,13 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): (q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) + self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim) # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION k, i, j = np.asarray(ich == 1).nonzero() @@ -948,9 +877,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) @@ -958,9 +885,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) k, i, j = np.asarray(ich == 1).nonzero() k, i, j = ( @@ -978,9 +903,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) @@ -988,9 +911,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) except Exception as e: print(e) @@ -1013,12 +934,8 @@ def _accumulate_flow_ssst(self, recname, kstpkper, totim): if imeth == 2 or imeth == 5: # LIST - qin = np.ma.zeros( - (self.nlay * self.nrow * self.ncol), self.float_type - ) - qout = np.ma.zeros( - (self.nlay * self.nrow * self.ncol), self.float_type - ) + qin = np.ma.zeros((self.nlay * self.nrow * self.ncol), self.float_type) + qout = np.ma.zeros((self.nlay * self.nrow * self.ncol), self.float_type) for [node, q] in zip(data["node"], data["q"]): idx = node - 1 if q > 0: @@ -1053,9 +970,7 @@ def _accumulate_flow_ssst(self, recname, kstpkper, totim): qout[0, r, c] = data[r, c] else: # Should not happen - raise Exception( - f'Unrecognized "imeth" for {recname} record: {imeth}' - ) + raise Exception(f'Unrecognized "imeth" for {recname} record: {imeth}') # Inflows fz = [] @@ -1111,13 +1026,9 @@ def _compute_mass_balance(self, kstpkper, totim): (self._budget["totim"] == totim) & np.in1d(self._budget["name"], innames) ).nonzero() - a = _numpyvoid2numeric( - self._budget[list(self._zonenamedict.values())][rowidx] - ) + a = _numpyvoid2numeric(self._budget[list(self._zonenamedict.values())][rowidx]) intot = np.array(a.sum(axis=0)) - tz = np.array( - list([n for n in self._budget.dtype.names if n not in skipcols]) - ) + tz = np.array(list([n for n in self._budget.dtype.names if n not in skipcols])) fz = np.array(["TOTAL_IN"] * len(tz)) self._update_budget_fromssst(fz, tz, intot, kstpkper, totim) @@ -1133,28 +1044,20 @@ def _compute_mass_balance(self, kstpkper, totim): (self._budget["totim"] == totim) & np.in1d(self._budget["name"], outnames) ).nonzero() - a = _numpyvoid2numeric( - self._budget[list(self._zonenamedict.values())][rowidx] - ) + a = _numpyvoid2numeric(self._budget[list(self._zonenamedict.values())][rowidx]) outot = np.array(a.sum(axis=0)) - tz = np.array( - list([n for n in self._budget.dtype.names if n not in skipcols]) - ) + tz = np.array(list([n for n in self._budget.dtype.names if n not in skipcols])) fz = np.array(["TOTAL_OUT"] * len(tz)) self._update_budget_fromssst(fz, tz, outot, kstpkper, totim) # Compute IN-OUT - tz = np.array( - list([n for n in self._budget.dtype.names if n not in skipcols]) - ) + tz = np.array(list([n for n in self._budget.dtype.names if n not in skipcols])) f = intot - outot fz = np.array(["IN-OUT"] * len(tz)) self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) # Compute percent discrepancy - tz = np.array( - list([n for n in self._budget.dtype.names if n not in skipcols]) - ) + tz = np.array(list([n for n in self._budget.dtype.names if n not in skipcols])) fz = np.array(["PERCENT_DISCREPANCY"] * len(tz)) in_minus_out = intot - outot in_plus_out = intot + outot @@ -1233,9 +1136,7 @@ def get_budget(self, names=None, zones=None, net=False, pivot=False): return recarray - def get_volumetric_budget( - self, modeltime, recarray=None, extrapolate_kper=False - ): + def get_volumetric_budget(self, modeltime, recarray=None, extrapolate_kper=False): """ Method to generate a volumetric budget table based on flux information @@ -1288,10 +1189,7 @@ def to_csv(self, fname): f.write(",".join(self._budget.dtype.names) + "\n") # Write rows for rowidx in range(self._budget.shape[0]): - s = ( - ",".join([str(i) for i in list(self._budget[:][rowidx])]) - + "\n" - ) + s = ",".join([str(i) for i in list(self._budget[:][rowidx])]) + "\n" f.write(s) return @@ -1632,10 +1530,7 @@ def write_zone_file(cls, fname, array, fmtin=None, iprn=None): end = start + fmtin vals = rowvals[start:end] while len(vals) > 0: - s = ( - "".join([formatter(int(val)) for val in vals]) - + "\n" - ) + s = "".join([formatter(int(val)) for val in vals]) + "\n" f.write(s) start = end end = start + fmtin @@ -1644,10 +1539,7 @@ def write_zone_file(cls, fname, array, fmtin=None, iprn=None): elif fmtin == ncol: for row in range(nrow): vals = array[lay, row, :].ravel() - f.write( - "".join([formatter(int(val)) for val in vals]) - + "\n" - ) + f.write("".join([formatter(int(val)) for val in vals]) + "\n") def copy(self): """ @@ -1679,8 +1571,7 @@ def export(self, f, ml, **kwargs): if isinstance(f, str): if not f.endswith(".nc"): raise AssertionError( - "File extension must end with .nc to " - "export a netcdf file" + "File extension must end with .nc to export a netcdf file" ) zbncfobj = dataframe_to_netcdf_fmt( @@ -1834,9 +1725,7 @@ def run_model(self, exe_name=None, nam_file=None, silent=False): exe_name = self._exe_name if nam_file is None: nam_file = os.path.join(self._name + self._extension) - return run_model( - exe_name, nam_file, model_ws=self._model_ws, silent=silent - ) + return run_model(exe_name, nam_file, model_ws=self._model_ws, silent=silent) def __setattr__(self, key, value): if key in ("zon", "bud", "grb", "cbc"): @@ -1870,9 +1759,7 @@ def add_package(self, pkg_name, pkg): if pkg_name == "cbc": pkg_name = "bud" else: - raise KeyError( - f"{pkg_name} package is not valid for zonebudget" - ) + raise KeyError(f"{pkg_name} package is not valid for zonebudget") if isinstance(pkg, str): if os.path.exists(os.path.join(self._model_ws, pkg)): @@ -1965,9 +1852,7 @@ def get_dataframes( >>> df = zb6.get_dataframes() """ - recarray = self.get_budget( - names=names, zones=zones, net=net, pivot=pivot - ) + recarray = self.get_budget(names=names, zones=zones, net=net, pivot=pivot) return _recarray_to_dataframe( recarray, @@ -1979,9 +1864,7 @@ def get_dataframes( pivot=pivot, ) - def get_budget( - self, f=None, names=None, zones=None, net=False, pivot=False - ): + def get_budget(self, f=None, names=None, zones=None, net=False, pivot=False): """ Method to read and get zonebudget output @@ -2009,15 +1892,11 @@ def get_budget( if f is None and self._recarray is None: f = os.path.join(self._model_ws, f"{self._name}.csv") - self._recarray = _read_zb_csv2( - f, add_prefix=False, aliases=aliases - ) + self._recarray = _read_zb_csv2(f, add_prefix=False, aliases=aliases) elif f is None: pass else: - self._recarray = _read_zb_csv2( - f, add_prefix=False, aliases=aliases - ) + self._recarray = _read_zb_csv2(f, add_prefix=False, aliases=aliases) recarray = _get_budget( self._recarray, @@ -2032,9 +1911,7 @@ def get_budget( return recarray - def get_volumetric_budget( - self, modeltime, recarray=None, extrapolate_kper=False - ): + def get_volumetric_budget(self, modeltime, recarray=None, extrapolate_kper=False): """ Method to generate a volumetric budget table based on flux information @@ -2147,8 +2024,7 @@ def export(self, f, ml, **kwargs): f = str(f) if not f.endswith(".nc"): raise AssertionError( - "File extension must end with .nc to " - "export a netcdf file" + "File extension must end with .nc to export a netcdf file" ) zbncfobj = dataframe_to_netcdf_fmt( @@ -2229,8 +2105,7 @@ def write_input(self, f=None, line_length=20): with open(f, "w") as foo: bfmt = [" {:d}"] foo.write( - f"BEGIN DIMENSIONS\n NCELLS {self.ncells}\n" - "END DIMENSIONS\n\n" + f"BEGIN DIMENSIONS\n NCELLS {self.ncells}\nEND DIMENSIONS\n\n" ) foo.write("BEGIN GRIDDATA\n IZONE\n") @@ -2393,9 +2268,7 @@ def _recarray_to_dataframe( elif timeunit.upper() == "YEARS": timeunit = "Y" - errmsg = ( - f"Specified time units ({timeunit}) not recognized. Please use one of " - ) + errmsg = f"Specified time units ({timeunit}) not recognized. Please use one of " assert timeunit in valid_timeunit, errmsg + ", ".join(valid_timeunit) + "." df = pd.DataFrame().from_records(recarray) @@ -2565,12 +2438,8 @@ def _compute_net_budget(recarray, zonenamedict): :return: """ recnames = _get_record_names(recarray) - innames = [ - n for n in recnames if n.startswith("FROM_") or n.endswith("_IN") - ] - outnames = [ - n for n in recnames if n.startswith("TO_") or n.endswith("_OUT") - ] + innames = [n for n in recnames if n.startswith("FROM_") or n.endswith("_IN")] + outnames = [n for n in recnames if n.startswith("TO_") or n.endswith("_OUT")] select_fields = ["totim", "time_step", "stress_period", "name"] + list( zonenamedict.values() ) @@ -2944,8 +2813,7 @@ def _pivot_recarray(recarray): n = 0 for kstp, kper in kstp_kper: idxs = np.asarray( - (recarray["time_step"] == kstp) - & (recarray["stress_period"] == kper) + (recarray["time_step"] == kstp) & (recarray["stress_period"] == kper) ).nonzero() if len(idxs) == 0: pass diff --git a/pyproject.toml b/pyproject.toml index a128be2ab4..db93cda2e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -116,7 +116,7 @@ include = ["flopy", "flopy.*"] "flopy.plot" = ["mplstyle/*.mplstyle"] [tool.ruff] -line-length = 79 +line-length = 88 include = [ "pyproject.toml", "flopy/**/*.py", @@ -129,12 +129,18 @@ extend-include = [ "examples/**/*.ipynb" ] +[tool.ruff.format] +exclude = [ + "flopy/mf6/**/*.py", +] + [tool.ruff.lint] select = [ "D409", # pydocstyle - section-underline-matches-section-length "E", # pycodestyle error "F", # Pyflakes "I001", # isort - unsorted-imports + # "ISC001", # implicitly concatenated string literals ] ignore = [ "E402", # module level import not at top of file @@ -150,3 +156,6 @@ ignore = [ "F821", # undefined name TODO FIXME "F841", # local variable assigned but never used ] + +[tool.ruff.lint.per-file-ignores] +"flopy/mf6/**/*.py" = ["ISC001"] \ No newline at end of file diff --git a/scripts/process_benchmarks.py b/scripts/process_benchmarks.py index b934c5b336..990722fd9d 100644 --- a/scripts/process_benchmarks.py +++ b/scripts/process_benchmarks.py @@ -73,9 +73,7 @@ def matplotlib_plot(stats): # markers according to system systems = np.unique(benchmarks_df["system"]) markers = dict(zip(systems, ["x", "o", "s"])) # osx, linux, windows - benchmarks_df["marker"] = benchmarks_df["system"].apply( - lambda x: markers[x] - ) + benchmarks_df["marker"] = benchmarks_df["system"].apply(lambda x: markers[x]) for i, (stat_name, stat_group) in enumerate(stats): stat_df = pd.DataFrame(stat_group) @@ -91,9 +89,7 @@ def matplotlib_plot(stats): for pi, python in enumerate(pythons): psub = ssub[ssub["python"] == python] color = colors[python] - ax.scatter( - psub["time"], psub["value"], color=color, marker=marker - ) + ax.scatter(psub["time"], psub["value"], color=color, marker=marker) ax.plot( psub["time"], psub["value"], diff --git a/scripts/update_version.py b/scripts/update_version.py index 619a9e133f..ba3e73ebed 100644 --- a/scripts/update_version.py +++ b/scripts/update_version.py @@ -226,7 +226,5 @@ def update_version( else: update_version( timestamp=datetime.now(), - version=( - Version(args.version) if args.version else _current_version - ), + version=(Version(args.version) if args.version else _current_version), )