Skip to content
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 58 additions & 0 deletions modifiers/allocation/modifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,10 @@ def determine_allocation(self, v):
if not v.n_threads_per_proc:
v.n_threads_per_proc = 1

# Calculate n_ranks_per_node if not explicitly set
if not v.n_ranks_per_node and v.n_ranks and v.n_nodes:
v.n_ranks_per_node = math.ceil(v.n_ranks / v.n_nodes)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If pbs always requires setting n_ranks_per_node (or in particular when n_ranks and n_nodes are set) I think that might best be done in pbs_instructions (based on reading it I assume it is not required, and in that case as well I think it might be best to skip this): this function implicitly ensures that either n_nodes or n_ranks is set by the end of it, and scheduler option functions tend to work with that; if this also sets n_ranks_per_node itself, it could over-specify (e.g. the potential mismatch from calling ceil).

Perhaps if n_nodes is set but not n_ranks/n_ranks_per_node then I should be setting n_ranks_per_node = sys_cores_per_node in this function. I assumed schedulers were doing that automatically; nearly all experiments in benchpark set n_ranks.


# Final check, make sure the above arithmetic didn't result in an
# unreasonable allocation request.
for var, val in v.defined():
Expand Down Expand Up @@ -466,13 +470,67 @@ def pjm_instructions(self, v):
v.batch_submit = "pjsub {execute_experiment}"
v.allocation_directives = "\n".join(batch_directives)

def pbs_instructions(self, v):
batch_opts, cmd_opts = Allocation._init_batch_and_cmd_opts(v)

if v.n_ranks and v.n_nodes and v.n_ranks_per_node:
expected_ranks = v.n_nodes * v.n_ranks_per_node
if v.n_ranks != expected_ranks:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Might get a complaint given that you are

        if not v.n_ranks_per_node and v.n_ranks and v.n_nodes:
            v.n_ranks_per_node = math.ceil(v.n_ranks / v.n_nodes)

raise ValueError(
f"Inconsistent rank specification: n_ranks ({v.n_ranks}) != "
f"n_nodes ({v.n_nodes}) * n_ranks_per_node ({v.n_ranks_per_node})"
)

if v.n_ranks:
cmd_opts.append(f"-np {v.n_ranks}")

if v.n_nodes:
node_spec = f"nodes={v.n_nodes}"
if v.n_ranks_per_node:
node_spec += f":ppn={v.n_ranks_per_node}"

if v.n_gpus and v.sys_gpus_per_node:
if v.n_gpus > v.n_nodes * v.sys_gpus_per_node:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems like a check I should have added to determine_allocation - would you mind moving this there?

raise ValueError(
f"Requested GPUs ({v.n_gpus}) exceeds available GPUs "
f"({v.n_nodes * v.sys_gpus_per_node}) on {v.n_nodes} nodes"
)
# Distribute GPUs as evenly as possible
gpus_per_node = v.n_gpus // v.n_nodes
if v.n_gpus % v.n_nodes > 0:
gpus_per_node += 1
gpus_per_node = min(gpus_per_node, v.sys_gpus_per_node)
node_spec += f":gpus={gpus_per_node}"
elif v.n_gpus:
raise ValueError(
"GPU allocation requested but sys_gpus_per_node not specified"
)

batch_opts.append(f"-l {node_spec}")

if v.queue:
batch_opts.append(f"-q {v.queue}")

if v.timeout:
batch_opts.append(f"-l walltime={TimeFormat.as_hhmmss(v.timeout)}")

if v.bank:
batch_opts.append(f"-A {v.bank}")

batch_directives = list(f"#PBS {x}" for x in batch_opts)

v.mpi_command = f"mpiexec {' '.join(cmd_opts)}"
v.batch_submit = "qsub {execute_experiment}"
v.allocation_directives = "\n".join(batch_directives)

def determine_scheduler_instructions(self, v):
handler = {
"slurm": self.slurm_instructions,
"flux": self.flux_instructions,
"mpi": self.mpi_instructions,
"lsf": self.lsf_instructions,
"pjm": self.pjm_instructions,
"pbs": self.pbs_instructions,
}
if v.scheduler not in handler:
raise ValueError(
Expand Down