Skip to content
Merged
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
55 commits
Select commit Hold shift + click to select a range
772d7dd
add add void deck to format helper, add checker that floors_ag - void…
ShiZhongming Sep 9, 2025
471bf9f
add add void deck to format helper, add checker that floors_ag - void…
ShiZhongming Sep 14, 2025
cecbac8
Update archetypes_mapper.py
ShiZhongming Sep 14, 2025
89f30a8
unify the source of truth to architecture
ShiZhongming Sep 14, 2025
e4368a0
Update result_summary.py
ShiZhongming Sep 14, 2025
d94b8cf
Merge branch 'master' into some-fix-from-idp-shanghai---void-deck-and…
ShiZhongming Sep 19, 2025
d59270a
Refactor void_deck validation in migrator
reyery Sep 19, 2025
ca9dc84
Remove migrate_void_deck_data call from main
reyery Sep 19, 2025
261e502
Vectorize architecture property calculations
reyery Sep 19, 2025
3c6908b
Add type hint for dataframe
reyery Sep 19, 2025
f49e9d8
Update docstring for generate_architecture_csv
reyery Sep 19, 2025
2378c8d
Generate architecture CSV before demand calculation
reyery Sep 19, 2025
a3c5083
Generate architecture CSV if missing in plot_main
reyery Sep 19, 2025
9555831
Remove unused import
reyery Sep 19, 2025
5b160d7
Fix warning message formatting in void_deck_migrator
reyery Sep 19, 2025
a3a0cc6
Refactor architecture and void_deck utilities
reyery Sep 19, 2025
869dbc8
Refactor imports to use cea.datamanagement.utils
reyery Sep 19, 2025
0f7f254
Generate architecture CSV in thermal loads test setup
reyery Sep 19, 2025
6dfdf54
Add void deck data migration to thermal loads test
reyery Sep 19, 2025
6b4c12a
Fix merge conflicts in architecture CSV generation
reyery Sep 19, 2025
fbe780d
Get geometry data from zone dataframe
reyery Sep 19, 2025
261ab2c
Save updated zone geometry after void_deck migration
reyery Sep 19, 2025
15d3817
Call migrate_void_deck_data in generate_architecture_csv
reyery Sep 19, 2025
a48c124
Improve architecture CSV generation error handling
reyery Sep 19, 2025
c5650eb
Add docstring to calc_useful_areas function
reyery Sep 19, 2025
46e4390
Refactor useful area calculations in demand module
reyery Sep 19, 2025
7b26b9b
Use DataFrame index for merge
reyery Sep 19, 2025
2eb9c7b
Refactor geometry_reader_radiation_daysim method
reyery Sep 20, 2025
22d9c8e
Update geometry type to GeoDataFrame in BuildingRCModel
reyery Sep 20, 2025
7a25a5d
Refactor geometry_reader_radiation_daysim to update envelope DataFrame
reyery Sep 20, 2025
00812aa
Restrict zone geometry columns in BuildingGeometry
reyery Sep 20, 2025
f82c3d1
Refactor RC model calculations to use areas_df and envelope
reyery Sep 20, 2025
0447871
Fix solar calculation to use envelope U-values
reyery Sep 20, 2025
49313cf
Remove duplicated U-value fields from RCModelProperties
reyery Sep 20, 2025
bf104c5
Move area fields from RCModelProperties to EnvelopeProperties
reyery Sep 20, 2025
03cc1c1
Fix void_deck reference in RC model calculation
reyery Sep 20, 2025
38901a9
Remove footprint attribute from RCModelProperties
reyery Sep 20, 2025
0db063b
Ensure RCModelProperties has relevant properties
reyery Sep 20, 2025
60e2758
Use RC model GFA for small building detection
reyery Sep 20, 2025
8c64beb
Refactor to use envelope attributes after changes
reyery Sep 20, 2025
93e8b12
Use bpr attributes instead of recalculating
reyery Sep 20, 2025
47390bb
Remove 'name' column from hourly demand dataframes
reyery Sep 20, 2025
0f0bee5
Remove architecture CSV generation from test setup
reyery Sep 20, 2025
bdba1c5
Remove architecture CSV generation from demand_main
reyery Sep 20, 2025
38621da
Remove unused imports
reyery Sep 20, 2025
d8f9ce3
Handle void_deck column conflict in DataFrame merge
reyery Sep 20, 2025
565f8f8
Remove unused imports and architecture CSV generation
reyery Sep 21, 2025
f9890f6
Add architecture metrics handling to result summary
reyery Sep 21, 2025
4944435
Remove architecture output methods from InputLocator
reyery Sep 21, 2025
f6d3239
Remove generate_architecture_csv function
reyery Sep 21, 2025
45c1853
Remove 'name' from BUILDINGS_DEMANDS_COLUMNS
reyery Sep 21, 2025
96e8071
Refactor substation HEX data structures to use Series
reyery Sep 21, 2025
c59da2a
Refactor substation_HEX_sizing return values
reyery Sep 21, 2025
46b7eb7
Use .items instead of .keys
reyery Sep 21, 2025
ab9746a
Refactor demand DataFrame usage in substation_matrix
reyery Sep 21, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cea/analysis/lca/embodied.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import cea.inputlocator
from cea.constants import SERVICE_LIFE_OF_BUILDINGS, SERVICE_LIFE_OF_TECHNICAL_SYSTEMS, \
CONVERSION_AREA_TO_FLOOR_AREA_RATIO, EMISSIONS_EMBODIED_TECHNICAL_SYSTEMS
from cea.datamanagement.void_deck_migrator import migrate_void_deck_data
from cea.datamanagement.utils import migrate_void_deck_data

__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
Expand Down
2 changes: 1 addition & 1 deletion cea/datamanagement/archetypes_mapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ def archetypes_mapper(locator,
supply_mapper(locator, building_typology_df)



def indoor_comfort_mapper(list_uses, locator, occupant_densities, building_typology_df):
comfort_DB = pd.read_csv(locator.get_database_archetypes_use_type())
# define comfort
Expand Down Expand Up @@ -400,7 +401,6 @@ def verify_building_standards(building_typology_df, db_standards):
diff = typology_standards.difference(db_standards)
raise ValueError(f'The following standards are not found in the database: {", ".join(diff)}')


def main(config):
"""
Run the properties script with input from the reference case and compare the results. This ensures that changes
Expand Down
19 changes: 13 additions & 6 deletions cea/datamanagement/format_helper/cea4_migrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from cea.datamanagement.format_helper.cea4_verify import cea4_verify, verify_shp, \
COLUMNS_ZONE_4, print_verification_results_4, path_to_input_file_without_db_4, CSV_BUILDING_PROPERTIES_3_CSV
from cea.datamanagement.format_helper.cea4_verify_db import check_directory_contains_csv
from cea.datamanagement.utils import migrate_void_deck_data
from cea.utilities.dbf import dbf_to_dataframe

COLUMNS_ZONE_3 = ['Name', 'floors_bg', 'floors_ag', 'height_bg', 'height_ag']
Expand Down Expand Up @@ -550,9 +551,12 @@ def migrate_cea3_to_cea4(scenario, verbose=False):
pass
# print('For Scenario: {scenario}, '.format(scenario=scenario_name), 'zone.shp already follows the CEA-4 format.')
else:
raise ValueError('! zone.shp exists but follows neither the CEA-3 nor CEA-4 format. CEA cannot proceed with the data migration.'
'Check the following column(s) for CEA-3 format: {list_missing_attributes_zone_3}.'.format(list_missing_attributes_zone_3=list_missing_attributes_zone_3),
'Check the following column(s) for CEA-4 format: {list_missing_attributes_zone_4}.'.format(list_missing_attributes_zone_4=list_missing_attributes_zone_4)
if list_missing_attributes_zone_4[0] == 'void_deck' and len(list_missing_attributes_zone_4) == 1:
config = cea.config.Configuration()
locator = cea.inputlocator.InputLocator(config.scenario)
migrate_void_deck_data(locator)
else:
raise ValueError('! zone.shp exists but follows neither the CEA-3 nor CEA-4 format. CEA cannot proceed with the data migration. Check the following column(s) for CEA-3 format: {list_missing_attributes_zone_3}.'.format(list_missing_attributes_zone_3=list_missing_attributes_zone_3), 'Check the following column(s) for CEA-4 format: {list_missing_attributes_zone_4}.'.format(list_missing_attributes_zone_4=list_missing_attributes_zone_4)
Comment on lines +554 to +559
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Guard the ‘void_deck-only’ check and build the locator from the passed scenario (also avoids indexing errors).

  • Indexing list_missing_attributes_zone_4[0] will crash when the list is empty.
  • Use an equality check against ['void_deck'] and instantiate InputLocator with the function’s scenario argument (not the global config). Also, cea.inputlocator isn’t imported in this file.

Apply this diff:

-            else:
-                if list_missing_attributes_zone_4[0] == 'void_deck' and len(list_missing_attributes_zone_4) == 1:
-                    config = cea.config.Configuration()
-                    locator = cea.inputlocator.InputLocator(config.scenario)
-                    migrate_void_deck_data(locator)
-                else:
+            else:
+                if list_missing_attributes_zone_4 == ['void_deck']:
+                    locator = cea.inputlocator.InputLocator(scenario)
+                    migrate_void_deck_data(locator)
+                else:
                     raise ValueError('! zone.shp exists but follows neither the CEA-3 nor CEA-4 format. CEA cannot proceed with the data migration. Check the following column(s) for CEA-3 format: {list_missing_attributes_zone_3}.'.format(list_missing_attributes_zone_3=list_missing_attributes_zone_3), 'Check the following column(s) for CEA-4 format: {list_missing_attributes_zone_4}.'.format(list_missing_attributes_zone_4=list_missing_attributes_zone_4)
                                  )

Add the missing import near the top of the file:

import cea.inputlocator
🤖 Prompt for AI Agents
In cea/datamanagement/format_helper/cea4_migrate.py around lines 554 to 559,
replace the unsafe indexing check and wrong locator creation: guard against
empty list by checking equality to ['void_deck'] (i.e.
list_missing_attributes_zone_4 == ['void_deck']) instead of
list_missing_attributes_zone_4[0], instantiate the InputLocator using the
function's scenario argument (locator = cea.inputlocator.InputLocator(scenario))
rather than creating a new global config, and add the missing import "import
cea.inputlocator" near the top of the file so the module is available.

)
else:
print("! Ensure zone.shp (CEA-3 format) is present in building-geometry folder.")
Expand All @@ -571,9 +575,12 @@ def migrate_cea3_to_cea4(scenario, verbose=False):
pass
# print('For Scenario: {scenario}, '.format(scenario=scenario_name), 'surroundings.shp already follows the CEA-4 format.')
else:
raise ValueError('surroundings.shp exists but follows neither the CEA-3 nor CEA-4 format. CEA cannot proceed with the data migration.'
'Check the following column(s) for CEA-3 format: {list_missing_attributes_surroundings_3}.'.format(list_missing_attributes_surroundings_3=list_missing_attributes_surroundings_3),
'Check the following column(s) for CEA-4 format: {list_missing_attributes_surroundings_4}.'.format(list_missing_attributes_surroundings_4=list_missing_attributes_surroundings_4)
if list_missing_attributes_zone_4[0] == 'void_deck' and len(list_missing_attributes_zone_4) == 1:
config = cea.config.Configuration()
locator = cea.inputlocator.InputLocator(config.scenario)
migrate_void_deck_data(locator)
else:
raise ValueError('surroundings.shp exists but follows neither the CEA-3 nor CEA-4 format. CEA cannot proceed with the data migration. Check the following column(s) for CEA-3 format: {list_missing_attributes_surroundings_3}.'.format(list_missing_attributes_surroundings_3=list_missing_attributes_surroundings_3), 'Check the following column(s) for CEA-4 format: {list_missing_attributes_surroundings_4}.'.format(list_missing_attributes_surroundings_4=list_missing_attributes_surroundings_4)
)
Comment on lines +578 to 584
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Wrong list referenced in surroundings branch; remove void_deck migration here.

void_deck applies to zone.shp, not surroundings.shp. This branch incorrectly inspects list_missing_attributes_zone_4 and calls the void_deck migrator. It should just raise the existing error for surroundings.

Apply this diff:

-            else:
-                if list_missing_attributes_zone_4[0] == 'void_deck' and len(list_missing_attributes_zone_4) == 1:
-                    config = cea.config.Configuration()
-                    locator = cea.inputlocator.InputLocator(config.scenario)
-                    migrate_void_deck_data(locator)
-                else:
-                    raise ValueError('surroundings.shp exists but follows neither the CEA-3 nor CEA-4 format. CEA cannot proceed with the data migration. Check the following column(s) for CEA-3 format: {list_missing_attributes_surroundings_3}.'.format(list_missing_attributes_surroundings_3=list_missing_attributes_surroundings_3), 'Check the following column(s) for CEA-4 format: {list_missing_attributes_surroundings_4}.'.format(list_missing_attributes_surroundings_4=list_missing_attributes_surroundings_4)
-                                 )
+            else:
+                raise ValueError('surroundings.shp exists but follows neither the CEA-3 nor CEA-4 format. CEA cannot proceed with the data migration. Check the following column(s) for CEA-3 format: {list_missing_attributes_surroundings_3}.'.format(list_missing_attributes_surroundings_3=list_missing_attributes_surroundings_3), 'Check the following column(s) for CEA-4 format: {list_missing_attributes_surroundings_4}.'.format(list_missing_attributes_surroundings_4=list_missing_attributes_surroundings_4))
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if list_missing_attributes_zone_4[0] == 'void_deck' and len(list_missing_attributes_zone_4) == 1:
config = cea.config.Configuration()
locator = cea.inputlocator.InputLocator(config.scenario)
migrate_void_deck_data(locator)
else:
raise ValueError('surroundings.shp exists but follows neither the CEA-3 nor CEA-4 format. CEA cannot proceed with the data migration. Check the following column(s) for CEA-3 format: {list_missing_attributes_surroundings_3}.'.format(list_missing_attributes_surroundings_3=list_missing_attributes_surroundings_3), 'Check the following column(s) for CEA-4 format: {list_missing_attributes_surroundings_4}.'.format(list_missing_attributes_surroundings_4=list_missing_attributes_surroundings_4)
)
else:
raise ValueError('surroundings.shp exists but follows neither the CEA-3 nor CEA-4 format. CEA cannot proceed with the data migration. Check the following column(s) for CEA-3 format: {list_missing_attributes_surroundings_3}.'.format(list_missing_attributes_surroundings_3=list_missing_attributes_surroundings_3), 'Check the following column(s) for CEA-4 format: {list_missing_attributes_surroundings_4}.'.format(list_missing_attributes_surroundings_4=list_missing_attributes_surroundings_4))
🤖 Prompt for AI Agents
In cea/datamanagement/format_helper/cea4_migrate.py around lines 578 to 584, the
code incorrectly checks list_missing_attributes_zone_4 and calls
migrate_void_deck_data in the surroundings.shp branch; remove that conditional
and the migrate_void_deck_data call, and instead always raise the existing
ValueError for surroundings (combine the two formatted messages as currently
written) so surroundings.shp does not trigger void_deck migration.

else:
print('! (optional) Run Surroundings Helper to generate surroundings.shp after the data migration.')
Expand Down
121 changes: 121 additions & 0 deletions cea/datamanagement/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
from __future__ import annotations
from typing import TYPE_CHECKING
import warnings

import pandas as pd
import geopandas as gpd

if TYPE_CHECKING:
from cea.inputlocator import InputLocator


def migrate_void_deck_data(locator: InputLocator) -> None:
"""Check if void_deck exists in zone.shp and copy it from envelope.csv if necessary.
:param locator: the input locator object.
:type locator: cea.inputlocator.InputLocator
"""

zone_gdf = gpd.read_file(locator.get_zone_geometry())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Missing zone geometry update after void deck migration.

The migrate_void_deck_data function modifies the zone geometry by adding the void_deck column, but it doesn't save the updated GeoDataFrame back to the shapefile. This means subsequent calls to read the zone geometry won't see the migrated void_deck data.

Apply this diff to save the updated zone geometry:

 zone_gdf["void_deck"] = zone_gdf["void_deck"].fillna(0)
 print("Migrated void_deck data from envelope.csv to zone.shp.")
+zone_gdf.to_file(locator.get_zone_geometry())
 envelope_df.drop(columns=["void_deck"], inplace=True)
 envelope_df.to_csv(locator.get_building_architecture(), index=False)

And for the case where void_deck is initialized to 0:

 zone_gdf["void_deck"] = 0
 warnings.warn(
     "No void_deck data found in envelope.csv, setting to 0 in zone.shp"
 )
+zone_gdf.to_file(locator.get_zone_geometry())

Also applies to: 27-27

🤖 Prompt for AI Agents
In cea/datamanagement/utils/__init__.py around lines 19 and 27, after reading
zone_gdf and after migrate_void_deck_data modifies or initializes the void_deck
column you must write the updated GeoDataFrame back to the zone geometry
shapefile; call zone_gdf.to_file(locator.get_zone_geometry()) (preserve
CRS/index as needed) so subsequent reads see the migrated data, and do the same
when void_deck is initialized to 0.

isin_zone = "void_deck" in zone_gdf.columns

if not isin_zone:
envelope_df = pd.read_csv(locator.get_building_architecture())

if "void_deck" in envelope_df.columns:
# assign void_deck from envelope.csv to zone.shp and remove it from envelope.csv
zone_gdf = zone_gdf.merge(
envelope_df[["name", "void_deck"]], on="name", how="left"
)
zone_gdf["void_deck"] = zone_gdf["void_deck"].fillna(0)
print("Migrated void_deck data from envelope.csv to zone.shp.")
envelope_df.drop(columns=["void_deck"], inplace=True)
envelope_df.to_csv(locator.get_building_architecture(), index=False)

else: # cannot find void_deck anywhere, just initialize it to 0
zone_gdf["void_deck"] = 0
warnings.warn(
"No void_deck data found in envelope.csv, setting to 0 in zone.shp"
)

# Validate that floors_ag is larger than void_deck for each building
actual_floors = zone_gdf["floors_ag"] - zone_gdf["void_deck"]
invalid_floors = zone_gdf[actual_floors <= 0]
if len(invalid_floors) > 0:
invalid_buildings = invalid_floors["name"].tolist()
warnings.warn(f"Some buildings have void_deck greater than floors_ag: {invalid_buildings}",
RuntimeWarning)


def generate_architecture_csv(locator: InputLocator, building_typology_df: gpd.GeoDataFrame):
"""
Generate an architecture CSV file with geometric properties
Includes:
- Af_m2: Conditioned floor area [m2]
- Aroof_m2: Roof area [m2]
- GFA_m2: Gross floor area [m2]
- Aocc_m2: Occupied floor area [m2]
:param locator: InputLocator instance
:param building_typology_df: GeoDataFrame containing building geometry data
"""
# Get architecture database to access Hs, Ns, Es, occupied_bg values
architecture_DB = pd.read_csv(locator.get_database_archetypes_construction_type())
prop_architecture_df = building_typology_df.merge(architecture_DB, left_on='const_type', right_on='const_type',
# avoid column name conflicts and keep left ones
# possible conflicts: 'void_deck'
suffixes=('', '_y'))

# Calculate architectural properties
# Calculate areas based on geometry
footprint = prop_architecture_df.geometry.area # building footprint area
floors_ag = prop_architecture_df['floors_ag'] # above-ground floors
floors_bg = prop_architecture_df['floors_bg'] # below-ground floors
void_deck = prop_architecture_df['void_deck'] # void deck floors
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Missing void_deck column in building_typology_df causes pipeline failure.

The pipeline error indicates that void_deck is not present in building_typology_df. The function attempts to access prop_architecture_df['void_deck'] without checking if it exists, causing a KeyError when the column is missing.

Apply this diff to handle the missing column:

 floors_ag = prop_architecture_df['floors_ag']  # above-ground floors
 floors_bg = prop_architecture_df['floors_bg']  # below-ground floors
-void_deck = prop_architecture_df['void_deck']  # void deck floors
+void_deck = prop_architecture_df['void_deck'] if 'void_deck' in prop_architecture_df.columns else 0  # void deck floors

Alternatively, ensure migrate_void_deck_data is called before generate_architecture_csv to guarantee the column exists:

# Add at the beginning of generate_architecture_csv function
migrate_void_deck_data(locator)
building_typology_df = gpd.read_file(locator.get_zone_geometry())  # Re-read after migration
🧰 Tools
🪛 GitHub Actions: CI

[error] 72-72: KeyError: 'void_deck' while generating architecture CSV in generate_architecture_csv (column 'void_deck' missing).


# Get shares from architecture database
Hs = prop_architecture_df['Hs'] # Share of GFA that is conditioned
Ns = prop_architecture_df['Ns'] # Share of GFA that is occupied
occupied_bg = prop_architecture_df['occupied_bg'] # Whether basement is occupied

# Calculate GFA components using proper equations
gfa_ag_m2 = footprint * (floors_ag - void_deck) # Above-ground GFA
gfa_bg_m2 = footprint * floors_bg # Below-ground GFA
gfa_m2 = gfa_ag_m2 + gfa_bg_m2 # Total GFA

# Split shares between above and below ground areas
# Using the same logic as in useful_areas.py split_above_and_below_ground_shares
effective_floors_ag = floors_ag - void_deck
denominator = effective_floors_ag + floors_bg * occupied_bg
share_ag = effective_floors_ag / denominator
# Handle division by zero case
share_ag = share_ag.fillna(1.0).where(denominator > 0, 1.0)
share_bg = 1 - share_ag

Hs_ag = Hs * share_ag
Hs_bg = Hs * share_bg
Ns_ag = Ns * share_ag
Ns_bg = Ns * share_bg

# Calculate areas using proper equations from useful_areas.py
af_m2 = gfa_ag_m2 * Hs_ag + gfa_bg_m2 * Hs_bg # Conditioned floor area
aocc_m2 = gfa_ag_m2 * Ns_ag + gfa_bg_m2 * Ns_bg # Occupied floor area
aroof_m2 = footprint # Roof area equals footprint

# Create DataFrame directly from vectorized calculations
architecture_df = pd.DataFrame({
'name': prop_architecture_df['name'],
'Af_m2': af_m2,
'Aroof_m2': aroof_m2,
'GFA_m2': gfa_m2,
'Aocc_m2': aocc_m2,
})

# Ensure parent folder exists
locator.ensure_parent_folder_exists(locator.get_architecture_csv())

# Save to CSV file
architecture_df.to_csv(locator.get_architecture_csv(), index=False, float_format='%.3f')
print(f"Architecture data generated and saved to: {locator.get_architecture_csv()}")

35 changes: 0 additions & 35 deletions cea/datamanagement/void_deck_migrator.py

This file was deleted.

8 changes: 7 additions & 1 deletion cea/demand/demand_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
import time
from itertools import repeat

import geopandas as gpd

import cea.config
import cea.inputlocator
import cea.utilities.parallel
Expand All @@ -15,7 +17,7 @@
from cea.utilities import epwreader
from cea.utilities.date import get_date_range_hours_from_year
from cea.demand import demand_writers
from cea.datamanagement.void_deck_migrator import migrate_void_deck_data
from cea.datamanagement.utils import generate_architecture_csv, migrate_void_deck_data


__author__ = "Jimeno A. Fonseca"
Expand Down Expand Up @@ -110,6 +112,10 @@ def main(config):

if not radiation_files_exist(locator, config):
raise MissingInputDataException("Missing radiation data in scenario. Consider running radiation script first.")

# Ensure that the architecture csv is generated
zone_df = gpd.read_file(locator.get_zone_geometry())
generate_architecture_csv(locator, zone_df)

demand_calculation(locator=locator, config=config)

Expand Down
18 changes: 13 additions & 5 deletions cea/demand/demand_writers.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def results_to_hdf5(self, tsd: TimeSeriesData, bpr: BuildingPropertiesRow, locat
self.write_to_hdf5(building_name, columns, hourly_data, locator)

# save total for the year
columns, data = self.calc_yearly_dataframe(bpr, building_name, tsd)
columns, data = self.calc_yearly_dataframe(bpr, building_name, tsd, locator)
# save to disc
partial_total_data = pd.DataFrame(data, index=[0])
partial_total_data.drop('name', inplace=True, axis=1)
Expand All @@ -101,12 +101,12 @@ def results_to_csv(self, tsd: TimeSeriesData, bpr: BuildingPropertiesRow, locato
self.write_to_csv(building_name, columns, hourly_data, locator)

# save annual values to a temp file for YearlyDemandWriter
columns, data = self.calc_yearly_dataframe(bpr, building_name, tsd)
columns, data = self.calc_yearly_dataframe(bpr, building_name, tsd, locator)
pd.DataFrame(data, index=[0]).to_csv(
locator.get_temporary_file('%(building_name)sT.csv' % locals()),
index=False, columns=columns, float_format='%.3f', na_rep='nan')

def calc_yearly_dataframe(self, bpr: BuildingPropertiesRow, building_name, tsd: TimeSeriesData):
def calc_yearly_dataframe(self, bpr: BuildingPropertiesRow, building_name, tsd: TimeSeriesData, locator):
# if printing total values is necessary
# treating timeseries data from W to MWh
data = dict((x + '_MWhyr', np.nan_to_num(tsd.get_load_value(x)).sum() / 1000000) for x in self.load_vars)
Expand All @@ -115,9 +115,17 @@ def calc_yearly_dataframe(self, bpr: BuildingPropertiesRow, building_name, tsd:
keys = data.keys()
columns = self.OTHER_VARS
columns.extend(keys)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Mutating shared class list (self.OTHER_VARS) causes column duplication across buildings

columns = self.OTHER_VARS extends the shared list in-place, so subsequent buildings will accumulate duplicate keys.

Apply this diff:

-        columns = self.OTHER_VARS
+        columns = self.OTHER_VARS.copy()
         columns.extend(keys)
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
keys = data.keys()
columns = self.OTHER_VARS
columns.extend(keys)
keys = data.keys()
columns = self.OTHER_VARS.copy()
columns.extend(keys)
🤖 Prompt for AI Agents
In cea/demand/demand_writers.py around lines 116 to 119, the code assigns
columns = self.OTHER_VARS and then extends it, mutating the shared class list
and causing duplicated columns across buildings; instead, create a new list
instance before extending (for example copy self.OTHER_VARS with
list(self.OTHER_VARS) or self.OTHER_VARS[:] and then extend that new list with
the data keys converted to a list), so the class-level OTHER_VARS is never
modified and each building gets its own columns list.

# get the architecture data
architecture_df = pd.read_csv(locator.get_architecture_csv())
Af_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'Af_m2'].iloc[0])
Aroof_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'Aroof_m2'].iloc[0])
GFA_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'GFA_m2'].iloc[0])
Aocc_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'Aocc_m2'].iloc[0])

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

⚠️ Potential issue

Architecture CSV read lacks guards; crash if file or building row missing

Add explicit checks for file existence and building presence to fail fast with a clear message. Also avoid re-reading the CSV per building if possible.

Apply this diff (minimal guard; optional caching noted in comment):

+        import os
+        arch_path = locator.get_architecture_csv()
+        if not os.path.exists(arch_path):
+            raise FileNotFoundError(f"Missing architecture CSV: {arch_path}. Generate it before running demand.")
-        architecture_df = pd.read_csv(locator.get_architecture_csv())
-        Af_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'Af_m2'].iloc[0])
-        Aroof_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'Aroof_m2'].iloc[0])
-        GFA_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'GFA_m2'].iloc[0])
-        Aocc_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'Aocc_m2'].iloc[0])
+        architecture_df = pd.read_csv(arch_path)
+        row = architecture_df.loc[architecture_df['name'] == building_name]
+        if row.empty:
+            raise KeyError(f"Building '{building_name}' not found in {arch_path}.")
+        Af_m2 = float(row['Af_m2'].iloc[0])
+        Aroof_m2 = float(row['Aroof_m2'].iloc[0])
+        GFA_m2 = float(row['GFA_m2'].iloc[0])
+        Aocc_m2 = float(row['Aocc_m2'].iloc[0])

Optional: cache architecture_df once per run to avoid N file reads (e.g., memoize by path).

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# get the architecture data
architecture_df = pd.read_csv(locator.get_architecture_csv())
Af_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'Af_m2'].iloc[0])
Aroof_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'Aroof_m2'].iloc[0])
GFA_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'GFA_m2'].iloc[0])
Aocc_m2 = float(architecture_df.loc[architecture_df['name'] == building_name, 'Aocc_m2'].iloc[0])
# get the architecture data
import os
arch_path = locator.get_architecture_csv()
if not os.path.exists(arch_path):
raise FileNotFoundError(f"Missing architecture CSV: {arch_path}. Generate it before running demand.")
architecture_df = pd.read_csv(arch_path)
row = architecture_df.loc[architecture_df['name'] == building_name]
if row.empty:
raise KeyError(f"Building '{building_name}' not found in {arch_path}.")
Af_m2 = float(row['Af_m2'].iloc[0])
Aroof_m2 = float(row['Aroof_m2'].iloc[0])
GFA_m2 = float(row['GFA_m2'].iloc[0])
Aocc_m2 = float(row['Aocc_m2'].iloc[0])

# add other default elements
data.update({'name': building_name, 'Af_m2': bpr.rc_model.Af, 'Aroof_m2': bpr.rc_model.Aroof,
'GFA_m2': bpr.rc_model.GFA_m2, 'Aocc_m2': bpr.rc_model.Aocc,
data.update({'name': building_name, 'Af_m2': Af_m2, 'Aroof_m2': Aroof_m2,
'GFA_m2': GFA_m2, 'Aocc_m2': Aocc_m2,
'people0': tsd.occupancy.people.max()})
return columns, data

Expand Down
2 changes: 1 addition & 1 deletion cea/demand/occupancy_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
import cea.utilities.parallel
from cea.constants import HOURS_IN_YEAR, MONTHS_IN_YEAR
from cea.datamanagement.schedule_helper import read_cea_schedule
from cea.datamanagement.void_deck_migrator import migrate_void_deck_data
from cea.datamanagement.utils import migrate_void_deck_data
from cea.demand.building_properties import calc_useful_areas
from cea.demand.constants import VARIABLE_CEA_SCHEDULE_RELATION
from cea.utilities import epwreader
Expand Down
9 changes: 7 additions & 2 deletions cea/import_export/result_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def get_results_path(locator, cea_feature, list_buildings):
list_appendix = []

if cea_feature == 'architecture':
path = locator.get_total_demand()
path = locator.get_architecture_csv()
list_paths.append(path)
list_appendix.append(cea_feature)

Expand Down Expand Up @@ -2142,7 +2142,12 @@ def normalize_dataframe(df, area_column):
df_time_path = locator.get_export_results_summary_cea_feature_time_period_file(
summary_folder, cea_feature, appendix, time_period, hour_start, hour_end
)
df_time_resolution = pd.read_csv(df_time_path)

if not os.path.exists(df_time_path):
print(f"File not found: {df_time_path}.")
break
else:
df_time_resolution = pd.read_csv(df_time_path)

if bool_use_acronym:
df_time_resolution.columns = map_metrics_and_cea_columns(
Expand Down
9 changes: 9 additions & 0 deletions cea/inputlocator.py
Original file line number Diff line number Diff line change
Expand Up @@ -1329,6 +1329,15 @@ def get_street_network(self):

# OUTPUTS

# ARCHITECTURE
def get_architecture_folder(self):
"""scenario/outputs/data/architecture"""
return os.path.join(self.scenario, 'outputs', 'data', 'architecture')

def get_architecture_csv(self):
"""scenario/outputs/data/architecture/architecture.csv"""
return os.path.join(self.get_architecture_folder(), 'architecture.csv')

# SOLAR-RADIATION
def get_solar_radiation_folder(self):
"""scenario/outputs/data/solar-radiation"""
Expand Down
2 changes: 1 addition & 1 deletion cea/resources/radiation/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
import cea.config
import cea.inputlocator
from cea.datamanagement.databases_verification import verify_input_geometry_zone, verify_input_geometry_surroundings
from cea.datamanagement.void_deck_migrator import migrate_void_deck_data
from cea.datamanagement.utils import migrate_void_deck_data
from cea.resources.radiation import daysim, geometry_generator
from cea.resources.radiation.daysim import GridSize
from cea.resources.radiation.radiance import CEADaySim
Expand Down
2 changes: 1 addition & 1 deletion cea/resources/radiationCRAX/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import cea.config
import cea.inputlocator
from cea.datamanagement.databases_verification import verify_input_geometry_zone, verify_input_geometry_surroundings
from cea.datamanagement.void_deck_migrator import migrate_void_deck_data
from cea.datamanagement.utils import migrate_void_deck_data
from cea.resources.radiation import geometry_generator
from cea.resources.radiation.daysim import GridSize, calc_sensors_building
from cea.resources.radiation.geometry_generator import BuildingGeometry
Expand Down
7 changes: 7 additions & 0 deletions cea/tests/test_calc_thermal_loads.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@
import unittest

import pandas as pd
import geopandas as gpd

from cea.config import DEFAULT_CONFIG, Configuration
from cea.datamanagement.utils import generate_architecture_csv, migrate_void_deck_data
from cea.demand.building_properties import BuildingProperties
from cea.demand.occupancy_helper import occupancy_helper_main
from cea.demand.thermal_loads import calc_thermal_loads
Expand Down Expand Up @@ -39,6 +41,11 @@ def setUpClass(cls):
# Extract reference case
cls.locator = ReferenceCaseOpenLocator()

# FIXME: Update reference case to have void deck and architecture file
# Ensure that the architecture csv is generated
migrate_void_deck_data(cls.locator)
generate_architecture_csv(cls.locator, gpd.read_file(cls.locator.get_zone_geometry()))

cls.config = Configuration(DEFAULT_CONFIG)
cls.config.scenario = cls.locator.scenario
cls.weather_data = epwreader.epw_reader(cls.locator.get_weather('Zug_inducity_2009'))[
Expand Down
2 changes: 1 addition & 1 deletion cea/visualisation/a_data_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

import cea.inputlocator
import os
import cea.config

from cea.import_export.result_summary import process_building_summary
import pandas as pd

Expand Down
Loading
Loading