|
1 | | -# |
2 | | -# Example slurm.conf file. Please run configurator.html |
3 | | -# (in doc/html) to build a configuration file customized |
4 | | -# for your environment. |
5 | | -# |
6 | | -# |
7 | | -# slurm.conf file generated by configurator.html. |
8 | | -# |
9 | | -# See the slurm.conf man page for more information. |
10 | | -# |
11 | 1 | ClusterName={{ openhpc_cluster_name }} |
12 | | -SlurmctldHost={{ openhpc_slurm_control_host }}{% if openhpc_slurm_control_host_address is defined %}({{ openhpc_slurm_control_host_address }}){% endif %} |
13 | 2 |
|
14 | | -#DisableRootJobs=NO |
15 | | -#EnforcePartLimits=NO |
16 | | -#EpilogSlurmctld= |
17 | | -#FirstJobId=1 |
18 | | -#MaxJobId=67043328 |
19 | | -#GresTypes= |
20 | | -#GroupUpdateForce=0 |
21 | | -#GroupUpdateTime=600 |
22 | | -#JobFileAppend=0 |
23 | | -#JobRequeue=1 |
24 | | -#JobSubmitPlugins=lua |
25 | | -#KillOnBadExit=0 |
26 | | -#LaunchType=launch/slurm |
27 | | -#Licenses=foo*4,bar |
28 | | -#MailProg=/bin/mail |
29 | | -#MaxJobCount=10000 |
30 | | -#MaxStepCount=40000 |
31 | | -#MaxTasksPerNode=512 |
32 | | -MpiDefault=none |
33 | | -#MpiParams=ports=#-# |
34 | | -#PluginDir= |
35 | | -#PlugStackConfig= |
36 | | -#PrivateData=jobs |
37 | | -ProctrackType=proctrack/linuxproc # TODO: really want cgroup but needs cgroup.conf and workaround for CI |
38 | | -#Prolog= |
39 | | -#PrologFlags= |
40 | | -#PrologSlurmctld= |
41 | | -#PropagatePrioProcess=0 |
42 | | -#PropagateResourceLimits= |
43 | | -#PropagateResourceLimitsExcept= |
44 | | -#RebootProgram= |
45 | | -SlurmctldPidFile=/var/run/slurmctld.pid |
46 | | -SlurmctldPort=6817 |
47 | | -SlurmdPidFile=/var/run/slurmd.pid |
48 | | -SlurmdPort=6818 |
49 | | -SlurmdSpoolDir=/var/spool/slurm # NB: not OpenHPC default! |
50 | | -SlurmUser=slurm |
51 | | -#SlurmdUser=root |
52 | | -#SrunEpilog= |
53 | | -#SrunProlog= |
54 | | -StateSaveLocation={{ openhpc_state_save_location }} |
55 | | -SwitchType=switch/none |
56 | | -#TaskEpilog= |
57 | | -#TaskPlugin=task/affinity |
58 | | -#TaskProlog= |
59 | | -#TopologyPlugin=topology/tree |
60 | | -#TmpFS=/tmp |
61 | | -#TrackWCKey=no |
62 | | -#TreeWidth= |
63 | | -#UnkillableStepProgram= |
64 | | -#UsePAM=0 |
65 | | -# |
66 | | -# |
67 | | -# TIMERS |
68 | | -#BatchStartTimeout=10 |
69 | | -#CompleteWait=0 |
70 | | -#EpilogMsgTime=2000 |
71 | | -#GetEnvTimeout=2 |
72 | | -#HealthCheckInterval=0 |
73 | | -#HealthCheckProgram= |
74 | | -InactiveLimit=0 |
75 | | -KillWait=30 |
76 | | -#MessageTimeout=10 |
77 | | -#ResvOverRun=0 |
78 | | -MinJobAge=300 |
79 | | -#OverTimeLimit=0 |
80 | | -SlurmctldTimeout=300 |
81 | | -SlurmdTimeout=300 |
82 | | -#UnkillableStepTimeout=60 |
83 | | -#VSizeFactor=0 |
84 | | -Waittime=0 |
85 | | -# |
86 | | -# |
87 | | -# SCHEDULING |
88 | | -#DefMemPerCPU=0 |
89 | | -#MaxMemPerCPU=0 |
90 | | -#SchedulerTimeSlice=30 |
91 | | -SchedulerType=sched/backfill |
92 | | -SelectType=select/cons_tres |
93 | | -SelectTypeParameters=CR_Core |
94 | | -# |
95 | | -# |
96 | | -# JOB PRIORITY |
97 | | -#PriorityFlags= |
98 | | -PriorityType=priority/multifactor |
99 | | -#PriorityDecayHalfLife= |
100 | | -#PriorityCalcPeriod= |
101 | | -#PriorityFavorSmall= |
102 | | -#PriorityMaxAge= |
103 | | -#PriorityUsageResetPeriod= |
104 | | -#PriorityWeightAge= |
105 | | -#PriorityWeightFairshare= |
106 | | -#PriorityWeightJobSize= |
107 | | -PriorityWeightPartition=1000 |
108 | | -#PriorityWeightQOS= |
109 | | -PreemptType=preempt/partition_prio |
110 | | -PreemptMode=SUSPEND,GANG |
111 | | -# |
112 | | -# LOGGING AND ACCOUNTING |
113 | | -#AccountingStorageEnforce=0 |
114 | | -AccountingStorageHost={{ openhpc_slurm_accounting_storage_host }} |
115 | | -{% if openhpc_slurm_accounting_storage_pass | default(false, true) %} |
116 | | -AccountingStoragePass={{ openhpc_slurm_accounting_storage_pass }} |
117 | | -{% endif %} |
118 | | -AccountingStoragePort={{ openhpc_slurm_accounting_storage_port }} |
119 | | -AccountingStorageType={{ openhpc_slurm_accounting_storage_type }} |
120 | | -AccountingStorageUser={{ openhpc_slurm_accounting_storage_user }} |
121 | | -#AccountingStoreFlags= |
122 | | -#JobCompHost= |
123 | | -JobCompLoc={{ openhpc_slurm_job_comp_loc }} |
124 | | -#JobCompPass= |
125 | | -#JobCompPort= |
126 | | -JobCompType={{ openhpc_slurm_job_comp_type }} |
127 | | -#JobCompUser= |
128 | | -#JobContainerType=job_container/none |
129 | | -JobAcctGatherFrequency={{ openhpc_slurm_job_acct_gather_frequency }} |
130 | | -JobAcctGatherType={{ openhpc_slurm_job_acct_gather_type }} |
| 3 | +# PARAMETERS |
| 4 | +{% for k, v in openhpc_default_config | combine(openhpc_config) | items %} |
| 5 | +{% if v != "omit" %}{# allow removing items using setting key: null #} |
| 6 | +{% if k != 'SlurmctldParameters' %}{# handled separately due to openhpc_slurm_configless #} |
| 7 | +{{ k }}={{ v | join(',') if (v is sequence and v is not string) else v }} |
| 8 | +{% endif %} |
| 9 | +{% endif %} |
| 10 | +{% endfor %} |
131 | 11 |
|
132 | | -# By default, SLURM will log to syslog, which is what we want |
133 | | -SlurmctldSyslogDebug=info |
134 | | -SlurmdSyslogDebug=info |
135 | | -#SlurmSchedLogFile= |
136 | | -#SlurmSchedLogLevel= |
137 | | -#DebugFlags= |
| 12 | +{% set slurmctldparameters = ((openhpc_config.get('SlurmctldParameters', []) + (['enable_configless'] if openhpc_slurm_configless | bool else [])) | unique) %} |
| 13 | +{% if slurmctldparameters | length > 0 %} |
| 14 | +SlurmctldParameters={{ slurmctldparameters | join(',') }} |
| 15 | +{% endif %} |
138 | 16 |
|
139 | 17 | # LOGIN-ONLY NODES |
140 | 18 | # Define slurmd nodes not in partitions for login-only nodes in "configless" mode: |
141 | 19 | {%if openhpc_login_only_nodes %}{% for node in groups[openhpc_login_only_nodes] %} |
142 | 20 | NodeName={{ node }} |
143 | 21 | {% endfor %}{% endif %} |
144 | 22 |
|
145 | | -PropagateResourceLimitsExcept=MEMLOCK |
146 | | -Epilog=/etc/slurm/slurm.epilog.clean |
147 | 23 |
|
148 | 24 | # COMPUTE NODES |
149 | 25 | {% for nodegroup in openhpc_nodegroups %} |
@@ -183,8 +59,3 @@ PartitionName={{partition.name}} {{ '' -}} |
183 | 59 | Nodes={{ partition.get('nodegroups', [partition.name]) | map('regex_replace', '^', 'nodegroup_') | join(',') }} {{ '' -}} |
184 | 60 | {{ partition.partition_params | default({}) | dict2parameters }} |
185 | 61 | {% endfor %}{# openhpc_partitions #} |
186 | | - |
187 | | -{% if openhpc_slurm_configless | bool %}SlurmctldParameters=enable_configless{% endif %} |
188 | | - |
189 | | - |
190 | | -ReturnToService=2 |
0 commit comments