diff --git a/docs/source/usage/workflows/memoryPerDevice.py b/docs/source/usage/workflows/memoryPerDevice.py index aba71e07fa..28b6f79821 100755 --- a/docs/source/usage/workflows/memoryPerDevice.py +++ b/docs/source/usage/workflows/memoryPerDevice.py @@ -48,20 +48,20 @@ super_cell_size = np.array((16, 16, 1)) # how many cells each gpu in this row of this dimension handles -# for example global_domain_division = [np.array((64,192)), np.array((960, 320))] - -# for simplicity's sake we will distribute cells evenly, but this is not required +# for example grid_distribution = [np.array((64,192)), np.array((960, 320))] +# here we distribute cells evenly, but this is not required if np.any(global_cell_extent % global_gpu_extent != 0): raise ValueError("global cell extent must be divisble by the global gpu extent") -global_domain_division = [] +grid_distribution = [] for dim in range(simulation_dimension): row_division = np.full(global_gpu_extent[dim], int(global_cell_extent[dim] / global_gpu_extent[dim]), dtype=np.int_) - global_domain_division.append(row_division) + grid_distribution.append(row_division) -print(f"global domain division: {global_domain_division}") +print(f"grid distribution: {grid_distribution}") -# get cell extent of each GPU, [simulation dimension, multi dimensional gpu index] -gpu_cell_extent = np.meshgrid(*global_domain_division) +# get cell extent of each GPU: list of np.array[np.int_], one per simulation dimension, with each array entry being the +# cell extent of the corresponding gpu in the simulation, indexation by [simulation_dimension, gpu_index[0], gpu_index[1], ...] +gpu_cell_extent = np.meshgrid(*grid_distribution) # extent of cells filled with particles for each gpu # init @@ -72,7 +72,7 @@ # calculate offset of gpu in cells gpu_cell_offset = np.empty(simulation_dimension) for dim in range(simulation_dimension): - gpu_cell_offset[dim] = np.sum(global_domain_division[dim][: gpu_index[dim]]) + gpu_cell_offset[dim] = np.sum(grid_distribution[dim][: gpu_index[dim]]) # figure out the extent of the particle filled cells of the gpu # for our example figure out how many cells in y-direction belong to the foil + pre-plasma @@ -106,7 +106,7 @@ ) else: - # fully in side target + # fully inside target for dim in range(simulation_dimension): gpu_particle_cell_extent[dim][gpu_index] = gpu_cell_extent[dim][gpu_index] diff --git a/lib/python/picongpu/extra/utils/memory_calculator.py b/lib/python/picongpu/extra/utils/memory_calculator.py index 061929f31e..b2132d2f4a 100755 --- a/lib/python/picongpu/extra/utils/memory_calculator.py +++ b/lib/python/picongpu/extra/utils/memory_calculator.py @@ -56,9 +56,9 @@ class Config: def __init__(self, **keyword_arguments): pydantic.BaseModel.__init__(self, **keyword_arguments) - self.checkDimensionsOfArrays() - self.shrink_to_simulation_dimension() - self.check() + self._check_dimensions_of_arrays() + self._shrink_to_simulation_dimension() + self._check() @staticmethod def get_value_size(precision: int) -> int: @@ -106,7 +106,7 @@ def get_predefined_attribute_dict(simulation_dimension: int, precision: int) -> } @typeguard.typechecked - def check_cell_extent(self, cell_extent: nptype.NDArray): + def _check_cell_extent(self, cell_extent: nptype.NDArray): """check cell extent is consistent with configuration""" if (cell_extent).ndim != 1: raise ValueError("cell_extent must be 1D array") @@ -122,7 +122,7 @@ def check_cell_extent(self, cell_extent: nptype.NDArray): " please set super_cell_size to a correct value" ) - def checkDimensionsOfArrays(self): + def _check_dimensions_of_arrays(self): """check all set array have expected dimension""" if (self.pml_border_size).ndim != 2: raise ValueError("pml_border_size must be 2D array") @@ -131,7 +131,7 @@ def checkDimensionsOfArrays(self): if (self.guard_size).ndim != 1: raise ValueError("guard_size must be 1D array") - def shrink_to_simulation_dimension(self): + def _shrink_to_simulation_dimension(self): if self.simulation_dimension == 2: if (self.pml_border_size).shape[0] == 3: self.pml_border_size = self.pml_border_size[:2] @@ -143,7 +143,7 @@ def shrink_to_simulation_dimension(self): self.guard_size = self.guard_size[:2] @typeguard.typechecked - def check(self): + def _check(self): """check configuration is sensible""" if self.simulation_dimension > 3 or self.simulation_dimension < 2: raise ValueError("PIConGPU only supports 2D or 3D simulations.") @@ -170,12 +170,12 @@ def memory_required_by_cell_fields( @return unit: bytes """ - self.check_cell_extent(cell_extent) + self._check_cell_extent(cell_extent) # PML size cannot exceed the local grid size pml_border_size = np.minimum(self.pml_border_size, cell_extent) - # one scalar each for temp fields, E_x, B_x, E_y, B_y, ... + # one scalar each temporary field and E, B, J field component number_fields = 3 * 3 + number_of_temporary_field_slots # number of additional PML field components: when enabled, @@ -200,7 +200,7 @@ def memory_required_by_super_cell_fields( super_cell_extent: nptype.NDArray, number_atomic_states_by_atomic_physics_ion_species: list[int], number_electron_histogram_bins: int, - IPDactive: bool = True, + ipd_active: bool = True, ) -> int: """ Memory required for super cell fields on a specific device(GPU/CPU/...) @@ -217,7 +217,7 @@ def memory_required_by_super_cell_fields( @return unit: bytes """ - self.check_cell_extent(super_cell_extent * self.super_cell_size) + self._check_cell_extent(super_cell_extent * self.super_cell_size) number_cells_per_supercell = np.prod(self.super_cell_size) value_size = MemoryCalculator.get_value_size(self.precision) @@ -258,7 +258,7 @@ def memory_required_by_super_cell_fields( + size_time_step ) - if IPDactive: + if ipd_active: per_super_cell_memory += ( ipd_sum_weight_all + ipd_sum_weight_electrons @@ -350,7 +350,7 @@ def memory_required_by_random_number_generator( @return unit: bytes """ - self.check_cell_extent(cell_extent) + self._check_cell_extent(cell_extent) if generator_method == "XorMin": # bytes