Skip to content

Commit

Permalink
Use uid=20075619(cenv0899) gid=11125(cenv0899_) groups=11125(cenv0899…
Browse files Browse the repository at this point in the history
…_),10031(OUCE_Linux_mistral),10044(linux-users),11180(cenv1030_) rather than
  • Loading branch information
thomas-fred committed Mar 27, 2024
1 parent cdda3dc commit e68287c
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 15 deletions.
12 changes: 6 additions & 6 deletions src/open_gira/direct_damages.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ def direct_damage(
Args:
exposure: Table containing exposed assets, likely edges split on a
raster grid (i.e. `edge_id` is not necessarily unique).
raster grid (i.e. `id` is not necessarily unique).
damage_curves: Relationship between hazard intensity and damage
fraction, keyed by `asset_type`.
hazard_columns: Columns in `exposure` which denote hazard intensities.
Expand All @@ -313,7 +313,7 @@ def direct_damage(
Returns:
Direct damage fraction, rows are splits of edges.
Direct damage cost, rows are edges and `edge_id` should now be unique.
Direct damage cost, rows are edges and `id` should now be unique.
"""

##########################################################
Expand Down Expand Up @@ -382,11 +382,11 @@ def direct_damage(
# join the other fields with the direct damage estimates
logging.info("Unifying rasterised segments and summing damage costs")

# grouping on edge_id, sum all direct damage estimates to give a total dollar cost per edge
# grouping on id, sum all direct damage estimates to give a total dollar cost per edge
direct_damages = pd.concat(
[direct_damages_only, damage_fraction["edge_id"]],
[direct_damages_only, damage_fraction["id"]],
axis="columns"
).set_index("edge_id")
).set_index("id")
grouped_direct_damages = direct_damages.groupby(direct_damages.index).sum()

return damage_fraction, grouped_direct_damages
return damage_fraction, grouped_direct_damages
8 changes: 4 additions & 4 deletions workflow/transport-flood/event_set_direct_damages.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,21 +82,21 @@

# lose columns like "cell_indicies" or rastered length measures that are specific to _rastered_ edges
non_hazard_output_columns = list(set(non_hazard_columns) & set(unsplit.columns))
unsplit_subset = unsplit[non_hazard_output_columns].set_index("edge_id", drop=False)
unsplit_subset = unsplit[non_hazard_output_columns].set_index("id", drop=False)

# rejoin direct damage cost estimates with geometry and metadata columns and write to disk
# join on 'right' / grouped_direct_damages index to only keep rows we have damages for
direct_damages = unsplit_subset.join(grouped_direct_damages, validate="one_to_one", how="right")
direct_damages["edge_id"] = direct_damages.index
direct_damages["id"] = direct_damages.index
# we may not have calculated damages for every possible asset_type
assert len(direct_damages) <= len(unsplit_subset)
assert "edge_id" in direct_damages.columns
assert "id" in direct_damages.columns

# damage_fraction is on the split geometries, will have more rows
assert len(damage_fraction) >= len(direct_damages)

for dataframe in (damage_fraction, direct_damages):
assert "edge_id" in dataframe
assert "id" in dataframe

logging.info(f"Writing out {damage_fraction.shape=} (per split geometry, event raster)")
damage_fraction.to_parquet(damage_fraction_path)
Expand Down
10 changes: 5 additions & 5 deletions workflow/transport-flood/return_period_direct_damages.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,15 +130,15 @@

# lose columns like "cell_indicies" or rastered length measures that are specific to _rastered_ edges
non_hazard_output_columns = list(set(non_hazard_columns) & set(unsplit.columns))
unsplit_subset = unsplit[non_hazard_output_columns].set_index("edge_id", drop=False)
unsplit_subset = unsplit[non_hazard_output_columns].set_index("id", drop=False)

# rejoin direct damage cost estimates with geometry and metadata columns and write to disk
# join on 'right' / grouped_direct_damages index to only keep rows we have damages for
direct_damages = unsplit_subset.join(grouped_direct_damages, validate="one_to_one", how="right")
direct_damages["edge_id"] = direct_damages.index
direct_damages["id"] = direct_damages.index
# we may not have calculated damages for every possible asset_type
assert len(direct_damages) <= len(unsplit_subset)
assert "edge_id" in direct_damages.columns
assert "id" in direct_damages.columns

expected_annual_damages_only = pd.DataFrame(data=expected_annual_damages, index=grouped_direct_damages.index)
# rejoin expected annual damage cost estimates with geometry and metadata columns and write to disk
Expand All @@ -147,7 +147,7 @@
unsplit_subset.join(expected_annual_damages_only, validate="one_to_one", how="right")
)
assert len(expected_annual_damages) <= len(unsplit_subset)
assert "edge_id" in expected_annual_damages.columns
assert "id" in expected_annual_damages.columns

# combined the per return period and the integrated outputs into a single dataframe
return_period_and_ead_damages = direct_damages.join(expected_annual_damages_only, validate="one_to_one")
Expand All @@ -161,7 +161,7 @@
assert len(direct_damages) == len(expected_annual_damages)

for dataframe in (damage_fraction, direct_damages, expected_annual_damages, return_period_and_ead_damages):
assert "edge_id" in dataframe
assert "id" in dataframe

logging.info(f"Writing out {damage_fraction.shape=} (per split geometry, hazard RP map)")
damage_fraction.to_parquet(damage_fraction_path)
Expand Down

0 comments on commit e68287c

Please sign in to comment.