forked from ECP-WarpX/regression_testing
-
Notifications
You must be signed in to change notification settings - Fork 0
/
example-tests.ini
285 lines (224 loc) · 10.5 KB
/
example-tests.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
# this is an example inputs file for the test suite. You can use this
# as a template to create your own suite.
# IMPORTANT NOTE: do not use quotes "" for strings here.
# the items in [main] control the overall behavior of the suite,
# including location of the output directories
[main]
# the parent directory for all the test suite stuff. The benchmarks
# and run directories will live under here
testTopDir = /home/testing/castro-tests/
# the location of the web output. This does not have to be under
# testTopDir, and often will be put in a directory that is seen
# by the webserver
webTopDir = /home/www/Castro/test-suite/test-suite-gfortran/
# one of C_Src, F_Src, or AMReX. This determines whether the C++
# build system is used, the F90 build system is used, or if the tests
# are standalone AMReX tests, respectively.
sourceTree = C_Src
# how many simultaneous build jobs (through the -j flag of make) are to
# be done
numMakeJobs = 8
# a descriptive name and subtitle. The name will prefix the output
# directories and appear on the webpages. The subtitle only shows up
# on the main webpage.
suiteName = Castro-SBU
sub_title = gfortran version
# do we add a link to the main web page that goes up to the parent
# directory (e.g., a link to "../"). This is useful if you have
# multiple test suite web pages all under a common parent directory.
goUpLink = 1
# should the main test webpage only include columns for tests that are
# defined in the inputs file? This can be used to filter out any
# old test names that might exist in older test runs, but are no
# longer tests that are actively run.
reportActiveTestsOnly = 1
# the names of the compilers for C++ and Fortran. These should be
# names that the AMReX build system can interpret, as these variables
# as passed directly to the make command.
COMP = g++
FCOMP = gfortran
# any additional options to add to the C++ make command
add_to_c_make_command = TEST=TRUE
# after the test is run and the comparisons are made, do we keep the
# plotfiles around? If this is 1, then any plot or checkpoint files,
# except the one that was used for the comparison will be deleted.
# Otherwise, all the output files will be tar/gzipped.
purge_output = 1
# each test suite invocation produces a test summary page with a table
# that lists each test run in runs with columns giving details about
# the run (like # of processors, etc.). These variables allow you
# to add information to the table by grepping the job_info file in
# the plotfile for the specified keyword, a colon separator, and then
# any text following is written to the summary table
summary_job_info_field1 = EOS
summary_job_info_field2 = network
# MPIcommand should use the placeholders:
# @host@ to indicate where to put the hostname to run on
# @nprocs@ to indicate where to put the number of processors
# @command@ to indicate where to put the command to run
#
# only tests with useMPI = 1 will run in parallel
# nprocs is problem dependent and specified in the individual problem
# sections.
MPIcommand = mpiexec -n @nprocs@ @command@
# the default_branch is used to compare with the branch we request to
# be used in the git repos (which can be set on a repo-by-repo basis).
# If any of the repo branches are not the same as the default branch,
# then an asterisk is placed next to the date in the main webpage to
# indicate that a non-default branch was used.
default_branch = development
# should we send an e-mail if any tests file? If so, where and what
# should the body of the email be?
sendEmailWhenFail = 1
emailTo = [email protected]
emailBody = Check http://bender.astro.sunysb.edu/Castro/test-suite/test-suite-gfortran/ for details
# some research groups use slack for communication. If you setup a
# slack webhook (see the slack documentation for this), then you can
# have the suite directly post when the test begins and ends (and # of
# failures) to a slack channel. You need to put the webhook URL in a
# plaintext file somewhere that is readable by the suite (for security
# reasons, we don't put it in this inputs file, in case this is under
# version control).
slack_post = 1
slack_webhookfile = /home/zingale/.slack.webhook
slack_channel = "#castro"
slack_username = "bender"
# Next we specify the source code repositories. Each git repo is
# given its own section.
# There will always be a "AMReX" repo.
[AMReX]
# dir is the full path to the AMReX git repo you will do the testing
# on. This should be a separate directoy from one that you do
# day-to-day work in, to ensure that there are no conflicts when the
# test suite does git pulls
dir = /home/testing/castro-tests/AMReX/
# branch is the git branch that you want AMReX to be on for the test
# suite run. Usually this is development or master
branch = development
# you will usually have a source section -- this corresponds to your
# application code. This has the "dir" and "branch" options just
# like for AMReX.
[source]
dir = /home/testing/castro-tests/Castro/
branch = development
# any additional git repositories needed to either build the
# application code, or contain problem directories that we actually
# will do the compilation in. The names of these sections will
# always start with "extra-". In addition to "dir" and "branch"
# for the repo, there are two additional options here, "comp_string"
# and "build". If "build = 1" is present, then the test suite will
# treat this repo as hosting problem directories that we compile
# in. Otherwise it will treat the repo as an additional source
# directory that the application code requires for building.
[extra-Microphysics]
dir = /home/testing/castro-tests/Microphysics
branch = development
# here comp_string sets a variable that will be used on the make line.
# The idea is to list any environment variables that the build system
# will need to interpret and access this additional source directory.
# Here we set the environment variable MICROPHYSICS_HOME to point to
# the full path of this additional source directory (this is
# substituted in for the keyword "@self@" here.
comp_string = MICROPHYSICS_HOME=@self@
[extra-wdmerger]
# this indicates that this directory will host test problems that
# the suite will compile in
build = 1
dir = /home/testing/castro-tests/wdmerger/
branch = development
# here comp_string sets the environment variable WDMERGER_HOME to
# point to this extra build repo's full path and sets another
# environment variable CASTRO_HOME to point to the full path of the
# main source directory (indicated in the [source] section). This is
# needed in this case to ensure that the alternate build directories
# contained here still see the main source directory that the suite is
# using.
comp_string = CASTRO_HOME=@source@ WDMERGER_HOME=@self@
# next come the problem setups. Each problem is given its own section.
# The name of the problem is given in the section heading -- this is how
# the test suite will refer to the problem. As an example, here's a
# problem definition template
[sample_problem]
# extra_build_dir is set to the name of the repo (other than source)
# that contains the problem build directory. This needs to be the
# name of a source repo defined as extra-reponame that had "build = 1"
# set as an option.
extra_build_dir =
# buildDir is the relative path from the build source directory (that
# specified in the [source] block by default) that defines where we
# build the test problem.
buildDir = Exec/Sod_stellar/
# inputFile is the inputs file that is given to the executable when
# it is run
inputFile = inputs-test1-helm
# some C++ AMReX applications require a probin file in addition to the
# inputs file. Specify that here
probinFile = probin-test1-helm
# a script to run in place of the executable. The script will be copied
# to the run directory and executed with the given arguments.
run_as_script =
script_args =
# link?File and aux?File (where ? = any nonnegative integer) give the names
# of any additional files that are needed to run the executable (e.g.
# initial models). These are specified relative to the problem's build
# directory. link files are soft-linked to the run directory, aux
# files are copied to the run directory.
link1File = helm_table.dat
aux1File = spherical.hse.640
# the dimensionality of the problem
dim = 2
# do we compile in debug mode or not?
debug = 1
# do visualization of the last plotfile output? This only works for
# 2- and 3-d. visVar gives the name of the plotfile to visualized.
# the output will be on the test's webpage.
doVis = 0
visVar = "Temp"
# is this a compile-only test? (e.g. just ensure the code compiles, but
# don't run anything).
compileTest = 0
# is this a restart test? If so, we run to completion and then
# restart from the checkpoint file number given by restartFileNum and
# run to completion again and compare the two outputs.
restartTest = 0
restartFileNum = 10
# is this a self test? Self-tests don't have a benchmark that they
# compare to. Instead the executable determines if the test was
# successful and reports a particular string upon success. This
# string should be specified as stSuccessString. The test suite will
# look for that string to determine if the test passed.
selfTest =
stSuccessString =
# MPI test? and number of processors for MPI
useMPI = 1
numprocs = 4
# OpenMP test? and number of threads to use
useOMP = 1
numthreads = 4
# OpenACC test?
acc = 1
# any additional variable definitions that need to be added to the problem's
# make command
addToCompileString = GENERAL_NET_INPUTS="${CASTRO_HOME}/Microphysics/networks/general_null/ignition.net"
# some problems have analysis routines (e.g. to compare to an analytic
# solution). these options control what script to run, what inputs it
# takes, and what output is produced.
analysisRoutine = Exec/Sod_stellar/testsuite_analysis/test1-helm.py
analysisMainArgs = source_dir
analysisOutputImage = Sod_stellar-test1-2d.png
# explicit output file to compare with -- exactly as it will be
# written. Not prefix of the test name will be done
outputFile =
# explicit output file to do the comparison with -- this is assumed to
# be prefixed with the test name when output by the code at runtime,
# e.g. test_plt00100. Normally this is not specified and the suite
# uses the last plotfile output by the test.
compareFile =
# directory or file to do a plain text diff on (recursive, if
# directory). For instance, the global diagnostic file output
# by the simulation
diffDir =
# options to use with the diff command for the diffDir comparison
diffOpts =
# any runtime parameters to append to the test's commandline
runtime_params =