diff --git a/app/get_data/bin/get_GM_data.sh b/app/get_data/bin/get_GM_data.sh
index ac19552..6c74725 100755
--- a/app/get_data/bin/get_GM_data.sh
+++ b/app/get_data/bin/get_GM_data.sh
@@ -2,41 +2,53 @@
fcst_date=$1
datadir=$2
+max_lead=$3
+accum_period=$4
+
+# Remove T and Z if present to normalize date format
+fcst_date=${fcst_date//T/}
+fcst_date=${fcst_date//Z/}
year=${fcst_date:0:4}
date=${fcst_date:0:8}
-hour=${fcst_date:9:2}
+hour=${fcst_date:8:2}
+
+# Create formatted version with T and Z for output filenames
+save_current_date=${date}T${hour}00Z
+
+echo "year: $year"
+echo "date: $date"
+echo "hour: $hour"
+echo "save_current_date: $save_current_date"
### Determine lead times based on forecast hour ###
if [[ "$hour" == "00" || "$hour" == "12" ]]; then
- leads_6h_end='72'
- leads_24h_end='144'
+ leads_end=$max_lead
else
- leads_6h_end='60'
- leads_24h_end='48'
+ leads_end=$max_lead
fi
## Define times for global update analysis - only 000 and 006 available ##
analysis_runs=("000" "006")
-### Get 6hr leads ###
-leads_6h=""
-for ((i=0; i<=leads_6h_end; i+=6)); do
- leads_6h="${leads_6h}${i} "
-done
-leads_6h=$(echo $leads_6h) # Remove trailing space if needed
-
-### Get 24hr leads ###
-leads_24h=""
-for ((i=0; i<=leads_24h_end; i+=24)); do
- leads_24h="${leads_24h}${i} "
-done
-leads_24h=$(echo $leads_24h) # Remove trailing space if needed
-
+### Get leads ###
+# Special case: if hour is 12 and accum_period is 24, use 12, 36, 60... instead of 0, 24, 48...
+if [[ "$hour" == "12" && "$accum_period" == "24" ]]; then
+ leads_h=""
+ for ((i=12; i<=leads_end; i+=accum_period)); do
+ leads_h="${leads_h}${i} "
+ done
+else
+ leads_h=""
+ for ((i=0; i<=leads_end; i+=accum_period)); do
+ leads_h="${leads_h}${i} "
+ done
+fi
+leads_h=$(echo $leads_h) # Remove trailing space if needed
#################### UPDATE ANALYSES ##########################
-
+## 0 & 6 used here as update analyses are only available at 000 and 006 ##
mass-pull () {
local analysis=$1
touch query
@@ -47,7 +59,7 @@ begin
end
EOF
- moo select -I query moose:/opfc/atm/global/prods/${year}.pp/ ${datadir}/6_hour/${fcst_date}_gl-up_${analysis}.pp
+ moo select -I query moose:/opfc/atm/global/prods/${year}.pp/ ${datadir}/6_hour/${save_current_date}_gl-up_${analysis}.pp
rm query
}
@@ -55,7 +67,7 @@ for analysis in "${analysis_runs[@]}"; do
mass-pull "$analysis"
done
-#################### 6 hrly ###########################
+#################### PER ACCUM PERIOD ###########################
mass-pull () {
touch query
@@ -67,69 +79,33 @@ cat >query < ${datadir}/6_hour/${fcst_date}_gl-mn_${accum}.pp"
-cat $list_of_files > ${datadir}/6_hour/${fcst_date}_gl-mn_${accum}.pp
-
-##################### 24 hrly ##########################
-
-mass-pull1 () {
-touch query1
-cat >query1 < ${datadir}/24_hour/${fcst_date}_gl-mn_${accum}.pp"
-cat $list_of_files > ${datadir}/24_hour/${fcst_date}_gl-mn_${accum}.pp
+accum=$(printf "%03d" $accum_period)
+echo "cat $list_of_files > ${datadir}/${accum_period}_hour/${save_current_date}_gl-mn_${accum}.pp"
+cat $list_of_files > ${datadir}/${accum_period}_hour/${save_current_date}_gl-mn_${accum}.pp
############################## FOR TRIALS #########################################
diff --git a/app/get_data/rose-app.conf b/app/get_data/rose-app.conf
index 8c418f7..a3865ae 100755
--- a/app/get_data/rose-app.conf
+++ b/app/get_data/rose-app.conf
@@ -1,2 +1,2 @@
[command]
-default=set -x; get_GM_data.sh $CYLC_TASK_CYCLE_POINT $DATADIR
\ No newline at end of file
+default=set -x; get_GM_data.sh $CYLC_TASK_CYCLE_POINT $DATADIR $MAX_LEAD $ACCUM_PERIOD
\ No newline at end of file
diff --git a/app/get_gpm/bin/calc_gpm_accumulation.py b/app/get_gpm/bin/calc_gpm_accumulation.py
index 288def8..2518c6f 100755
--- a/app/get_gpm/bin/calc_gpm_accumulation.py
+++ b/app/get_gpm/bin/calc_gpm_accumulation.py
@@ -42,6 +42,8 @@ def parse_args():
help="Maximum lead time")
parser.add_argument("-p", "--parallel", action="store_true",
help="Enable parallelism")
+ parser.add_argument("--cycling_on", type=str,
+ help="Flag to indicate running on VT or DT within cylc")
# Parse the command line.
args = parser.parse_args()
@@ -190,6 +192,7 @@ def main():
## Create output directory if it doesn't exist
period_outdir = os.path.join(out_dir, f"{acc_period}_hour_gpm")
os.makedirs(period_outdir, exist_ok=True)
+ cycling_on = args.cycling_on # flag to indicate running on VT or DT within cycl
if args.cutout:
cutout = args.cutout
@@ -200,13 +203,27 @@ def main():
cycle_point = datetime.datetime.strptime(args.cycle_point, '%Y%m%dT%H%MZ')
lead = args.max_lead
- START_ACCUM_DATE_DT = cycle_point
- START_ACCUM_DATE_STR = cycle_point.strftime('%Y%m%d%H')
- print(f"START_ACCUM_DATE: {START_ACCUM_DATE_STR}")
- END_ACCUM_DATE_DT = cycle_point + datetime.timedelta(hours=lead)
- END_ACCUM_DATE_STR = END_ACCUM_DATE_DT.strftime('%Y%m%d%H')
- print(f"END_ACCUM_DATE: {END_ACCUM_DATE_STR}")
- i = START_ACCUM_DATE_DT
+ print(f"We are cycling on {cycling_on}")
+
+ if cycling_on == 'DT':
+ START_ACCUM_DATE_DT = cycle_point
+ START_ACCUM_DATE_STR = cycle_point.strftime('%Y%m%d%H')
+ print(f"START_ACCUM_DATE: {START_ACCUM_DATE_STR}")
+ END_ACCUM_DATE_DT = cycle_point + datetime.timedelta(hours=lead)
+ END_ACCUM_DATE_STR = END_ACCUM_DATE_DT.strftime('%Y%m%d%H')
+ print(f"END_ACCUM_DATE: {END_ACCUM_DATE_STR}")
+ i = START_ACCUM_DATE_DT
+ elif cycling_on == 'VT':
+ END_ACCUM_DATE_DT = cycle_point
+ END_ACCUM_DATE_STR = cycle_point.strftime('%Y%m%d%H')
+ print(f"END_ACCUM_DATE: {END_ACCUM_DATE_STR}")
+ START_ACCUM_DATE_DT = cycle_point - datetime.timedelta(hours=acc_period)
+ START_ACCUM_DATE_STR = START_ACCUM_DATE_DT.strftime('%Y%m%d%H')
+ print(f"START_ACCUM_DATE: {START_ACCUM_DATE_STR}")
+ i = START_ACCUM_DATE_DT
+ else:
+ print("Error: cycling_on flag must be set to either VT or DT")
+ sys.exit(1)
while i < END_ACCUM_DATE_DT:
diff --git a/app/get_gpm/bin/gpm_vt_wrapper.sh b/app/get_gpm/bin/gpm_vt_wrapper.sh
new file mode 100755
index 0000000..7b4c67f
--- /dev/null
+++ b/app/get_gpm/bin/gpm_vt_wrapper.sh
@@ -0,0 +1,46 @@
+#!/bin/bash -l
+
+set -x
+module load scitools/production-os47-2
+
+export PYTHONPATH=${PYTHONPATH}:${CYLC_SUITE_DEF_PATH}/app/${ROSE_TASK_APP}/bin
+echo $PYTHONPATH
+echo $ROSE_TASK_APP
+echo $CYLC_TASK_NAME
+
+# CYLC_TASK_CYCLE_POINT is the model run (initialisation date/time)
+echo $CYLC_TASK_CYCLE_POINT
+echo $OUTPUT_DATA
+GPM_DATA_DIR="/data/users/gpm_imerg"
+GPM_OBS_TYPE="GPM_NRTlate"
+
+# 6-hour accumulation: from (cycle point - 6h) to cycle point
+START_ACCUM_PERIOD=$(isodatetime -u $CYLC_TASK_CYCLE_POINT --offset -PT6H --print-format=%Y%m%d%H)
+echo $START_ACCUM_PERIOD
+END_ACCUM_PERIOD=$CYLC_TASK_CYCLE_POINT
+
+echo "6-hour accumulation: $START_ACCUM_PERIOD to $END_ACCUM_PERIOD"
+python ${CYLC_SUITE_DEF_PATH}/app/${ROSE_TASK_APP}/bin/og_calc_gpm_accumulation.py \
+ --outdir $OUTPUT_DATA \
+ --datadir $GPM_DATA_DIR \
+ --obs $GPM_OBS_TYPE \
+ --accum_period 6 \
+ --start_date $START_ACCUM_PERIOD \
+ --end_date $END_ACCUM_PERIOD \
+ --cycle_point $CYLC_TASK_CYCLE_POINT
+
+# 24-hour accumulation: from (cycle point - 24h) to cycle point
+START_ACCUM_PERIOD=$(isodatetime -u $CYLC_TASK_CYCLE_POINT --offset -PT24H --print-format=%Y%m%d%H)
+END_ACCUM_PERIOD=$CYLC_TASK_CYCLE_POINT
+
+echo "24-hour accumulation: $START_ACCUM_PERIOD to $END_ACCUM_PERIOD"
+python ${CYLC_SUITE_DEF_PATH}/app/${ROSE_TASK_APP}/bin/og_calc_gpm_accumulation.py \
+ --outdir $OUTPUT_DATA \
+ --datadir $GPM_DATA_DIR \
+ --obs $GPM_OBS_TYPE \
+ --accum_period 24 \
+ --start_date $START_ACCUM_PERIOD \
+ --end_date $END_ACCUM_PERIOD \
+ --cycle_point $CYLC_TASK_CYCLE_POINT
+
+
diff --git a/app/get_gpm/rose-app.conf b/app/get_gpm/rose-app.conf
index 8b09360..5a62c88 100644
--- a/app/get_gpm/rose-app.conf
+++ b/app/get_gpm/rose-app.conf
@@ -1,2 +1,2 @@
[command]
-default=set -x; moxie-run calc_gpm_accumulation.py --datadir $GPM_DATA_DIR --obs $GPM_OBS_TYPE --accum_period $ACCUM_PERIOD --cycle_point $CYLC_TASK_CYCLE_POINT --max_lead $MAX_LEAD --outdir $OUTPUT_DATA
\ No newline at end of file
+default=set -x; moxie-run calc_gpm_accumulation.py --datadir $GPM_DATA_DIR --obs $GPM_OBS_TYPE --accum_period $ACCUM_PERIOD --cycle_point $CYLC_TASK_CYCLE_POINT --max_lead $MAX_LEAD --outdir $OUTPUT_DATA --cycling_on $CYCLING_ON
\ No newline at end of file
diff --git a/app/prep_gpm/bin/prep_gpm_accumulation.py b/app/prep_gpm/bin/prep_gpm_accumulation.py
new file mode 100755
index 0000000..0cbcfac
--- /dev/null
+++ b/app/prep_gpm/bin/prep_gpm_accumulation.py
@@ -0,0 +1,267 @@
+#!/usr/bin/env python3
+import argparse
+import datetime
+import dateutil.rrule
+import os
+import sys
+import numpy as np
+import iris
+import iris.cube
+import cf_units
+
+def parse_args():
+ '''
+ Processes and returns command line arguments
+ '''
+ parser = argparse.ArgumentParser(description='Process forecast precipitation data ready for SEEPS calculation')
+ name = parser.prog
+
+ # Required arguments
+ parser.add_argument("--datadir", metavar='data_directory',
+ help="Directory containing GPM data")
+ parser.add_argument("--obs", type=str,
+ help="Observation type (e.g. GPM, GPM_NRTlate")
+ parser.add_argument("-o", "--outdir", default=os.getcwd(),
+ help="Directory to save output cubes (default: $PWD)")
+ parser.add_argument("--accum_period", type=int,
+ help="Accumulation period to sum precipitation over (in hours)",
+ dest='accum_period')
+ parser.add_argument("--cutout",
+ nargs="*",
+ type=float,
+ default=None,
+ help=("Coordinates of subregion to cut out in the "
+ "form [min lon, max lon, min lat, max lat]."),
+ dest="cutout")
+ parser.add_argument("-v", "--verbose", default=0,
+ help="Produce verbose output. Values 0-50")
+ parser.add_argument("--cycle_point",
+ help="Cycle point from workflow")
+ parser.add_argument("--max_lead",
+ type=int,
+ help="Maximum lead time")
+ parser.add_argument("-p", "--parallel", action="store_true",
+ help="Enable parallelism")
+ parser.add_argument("--cycling_on", type=str,
+ help="Flag to indicate running on VT or DT within cylc")
+
+ # Parse the command line.
+ args = parser.parse_args()
+
+ if not args.datadir:
+ raise argparse.ArgumentTypeError("Must specify a data source directory.")
+
+ if not args.obs:
+ raise argparse.ArgumentTypeError("Must specify an observation type.")
+
+ if not args.cycle_point:
+ raise argparse.ArgumentTypeError("Must specify a cycle_point.")
+
+ return args
+
+def insert_datetime(filename, date_time):
+ '''
+ FUNCTION FROM RMED Toolbox
+ Inserts a datetime into a file name containing date formatting characters.
+
+ Arguments:
+
+ * **filename** - the name of a file. If this contains any of the special \
+ date formatting characters
+
+ * %Y - 4-digit year
+ * %m - 2-digit month
+ * %d - 2-digit day
+ * %H - 2-digit hour
+ * %M - 2-digit minute
+
+ then these are replaced with numeric values derived from the components \
+ of the supplied :class:`datetime.datetime` object.
+ * **date_time** - a :class:`datetime.datetime` object specifiying the \
+ datetime to insert in the given filename.
+
+ Returns the input filename with date formatting characters replaced by \
+ the appropriate components of date_time.
+ '''
+ filename = filename.replace("%Y", "{0:04d}".format(date_time.year))
+ filename = filename.replace("%m", "{0:02d}".format(date_time.month))
+ filename = filename.replace("%d", "{0:02d}".format(date_time.day))
+ filename = filename.replace("%H", "{0:02d}".format(date_time.hour))
+ filename = filename.replace("%M", "{0:02d}".format(date_time.minute))
+
+ return filename
+
+def _increment_dt(start_datetime, end_datetime, interval):
+ '''
+ Increment datetime by given time interval (limited to integer hours)
+ '''
+ date_time = start_datetime
+ while date_time <= end_datetime:
+ yield min(date_time, end_datetime)
+ date_time += datetime.timedelta(hours=interval)
+
+def get_data(start_date, end_date, data_dir, gpm_type, accum_period):
+ '''
+ Retrieve requested GPM data type from internal MO netCDF-stored files.
+ '''
+ frequency_in_hours = 0.5
+ gpm_frames_per_period = int(accum_period / frequency_in_hours)
+ print(gpm_frames_per_period)
+ num_periods = (end_date - start_date) // datetime.timedelta(hours=accum_period)
+ #num_periods = num_periods.seconds//3600
+ print(num_periods)
+
+ # get the first end accumulation date/time
+ end_date_0 = start_date + datetime.timedelta(hours=accum_period)
+ # generate start and end accumulation datetimes
+ start_accumulations = (_increment_dt(start_date, end_date, accum_period) for x in range(num_periods))
+ end_accumulations = (_increment_dt(end_date_0, end_date, accum_period) for x in range(num_periods))
+
+ print(gpm_type)
+ if gpm_type == 'GPM':
+ gpm_type = 'production'
+ data_dir = os.path.join(data_dir, "production")
+ gpm_filename = f"gpm_imerg_production_V???_%Y%m%d.nc"
+ elif gpm_type == 'GPM_NRTlate':
+ gpm_type = 'NRTlate'
+ data_dir = os.path.join(data_dir, 'NRTlate', "V???")
+ gpm_filename = f"gpm_imerg_NRTlate_V???_%Y%m%d.nc"
+ else:
+ raise NotImplementedError("Can't currently process that category of GPM data: {}".format(gpm_type))
+
+ first_day = start_date.replace(hour=0, minute=0, second=0)
+ last_day = end_date.replace(hour=23, minute=59, second=59)
+
+ gpm_cubes = iris.cube.CubeList()
+ for this_day in dateutil.rrule.rrule(dateutil.rrule.HOURLY,
+ interval=24,
+ dtstart=first_day,
+ until=last_day):
+ this_year = this_day.year
+ this_gpm_filename = os.path.join(data_dir, str(this_year), gpm_filename)
+ this_gpm_file = insert_datetime(this_gpm_filename, this_day)
+ print("Loading {}...".format(this_gpm_file))
+ try:
+ gpm_cube = iris.load_cube(this_gpm_file)
+ except OSError:
+ continue
+ gpm_cubes.append(gpm_cube)
+
+ # now concatenate cubes together (should only be time axis differing at previous step)
+ gpm_cube = gpm_cubes.concatenate_cube()
+ print("Accumulating precipitation over required interval, {}-hours, "
+ "and date ranges {}-{}...".format(accum_period, start_date, end_date))
+
+ # now accumulate data over desired time ranges
+ gpm_acc = iris.cube.CubeList()
+ for start_dt, end_dt in zip(next(start_accumulations), next(end_accumulations)):
+ # TODO: change this to use bounds at some point
+ min_daterange = iris.Constraint(time=lambda cell: cell.bound[0] >= start_dt)
+ max_daterange = iris.Constraint(time=lambda cell: cell.bound[1] < end_dt)
+ time_limited_gpm_cube = gpm_cube.extract(min_daterange & max_daterange)
+
+ # set values less than zero to missing data
+ time_limited_gpm_cube.data[(time_limited_gpm_cube.data < 0)] = np.nan
+
+ # GPM fields are in mm/hr units for each half-hourly field
+ gpm_sum = time_limited_gpm_cube.collapsed('time', iris.analysis.SUM) / 2.
+ gpm_sum.rename('precipitation amount')
+ gpm_sum.units = cf_units.Unit('mm')
+ print(gpm_sum)
+ gpm_acc.append(gpm_sum)
+
+ print(gpm_acc)
+ for cube in gpm_acc:
+ print(cube)
+
+ gpm_acc = gpm_acc.merge_cube()
+ print(gpm_acc)
+
+ return gpm_acc
+
+def main():
+ '''
+ Create pseudo-accumulations over requested time period and desired
+ sub-area, if supplied.
+ '''
+
+ #First, deal with arguments
+ args = parse_args()
+ out_dir = args.outdir
+ acc_period = args.accum_period
+ ## Create output directory if it doesn't exist
+ period_outdir = os.path.join(out_dir, f"{acc_period}_hour_gpm")
+ os.makedirs(period_outdir, exist_ok=True)
+ cycling_on = args.cycling_on # flag to indicate running on VT or DT within cycl
+
+ if args.cutout:
+ cutout = args.cutout
+ else:
+ cutout = None
+ data_dir = args.datadir
+ obstype = args.obs
+ cycle_point = datetime.datetime.strptime(args.cycle_point, '%Y%m%dT%H%MZ')
+ lead = args.max_lead
+
+ print(f"We are cycling on {cycling_on}")
+
+ if cycling_on == 'DT':
+ print("hello from DT")
+ START_ACCUM_DATE_DT = cycle_point - datetime.timedelta(hours=lead)
+ START_ACCUM_DATE_STR = START_ACCUM_DATE_DT.strftime('%Y%m%d%H')
+ print(f"START_ACCUM_DATE: {START_ACCUM_DATE_STR}")
+ END_ACCUM_DATE_DT = cycle_point
+ END_ACCUM_DATE_STR = END_ACCUM_DATE_DT.strftime('%Y%m%d%H')
+ print(f"END_ACCUM_DATE: {END_ACCUM_DATE_STR}")
+ i = START_ACCUM_DATE_DT
+ elif cycling_on == 'VT':
+ END_ACCUM_DATE_DT = cycle_point
+ END_ACCUM_DATE_STR = cycle_point.strftime('%Y%m%d%H')
+ print(f"END_ACCUM_DATE: {END_ACCUM_DATE_STR}")
+ START_ACCUM_DATE_DT = cycle_point - datetime.timedelta(hours=acc_period)
+ START_ACCUM_DATE_STR = START_ACCUM_DATE_DT.strftime('%Y%m%d%H')
+ print(f"START_ACCUM_DATE: {START_ACCUM_DATE_STR}")
+ i = START_ACCUM_DATE_DT
+ else:
+ print("Error: cycling_on flag must be set to either VT or DT")
+ sys.exit(1)
+
+ while i < END_ACCUM_DATE_DT:
+
+ sdate = i
+ edate = i + datetime.timedelta(hours=acc_period)
+ print(f"sdate, edate: {sdate, edate}")
+
+ # fetch gpm data and sum over required time period
+ gpm_cube = get_data(sdate, edate, data_dir, obstype, acc_period)
+ print(gpm_cube)
+ print("After fetching data...")
+
+ # extract over subregion, if required
+ if cutout:
+ print("Trimming data to sub-region {}".format(cutout))
+ lons = (cutout[0], cutout[1])
+ lats = (cutout[2], cutout[3])
+ gpm_cube = gpm_cube.intersection(longitude=lons, latitude=lats)
+ else:
+ print("No cutout requested. Using global data!")
+
+ # now save cube to netCDF
+ for this_time in gpm_cube.slices_over('time'):
+ time_coord = this_time.coord('time')
+ slice_time = time_coord.units.num2date(time_coord.bounds[-1][-1])
+ start_acc_time = time_coord.units.num2date(time_coord.bounds[-1][0]).strftime('%Y%m%d%H')
+ end_acc_time = (slice_time + datetime.timedelta(seconds=1)).strftime('%Y%m%d%H')
+ print(start_acc_time)
+ print(end_acc_time)
+ outf = os.path.join(period_outdir,'gpm_{}_{}.nc'.format(start_acc_time, end_acc_time))
+ print("Saving to {} ...".format(outf))
+ iris.save(this_time, outf, fill_value=np.nan)
+
+ # Move on to next 6h period
+ i = i + datetime.timedelta(hours=acc_period)
+ print(f"onto next i.. {i}")
+
+if __name__ == "__main__":
+ #iris.FUTURE.save_split_attrs = True
+ main()
diff --git a/app/prep_gpm/rose-app.conf b/app/prep_gpm/rose-app.conf
new file mode 100644
index 0000000..3d1f8d3
--- /dev/null
+++ b/app/prep_gpm/rose-app.conf
@@ -0,0 +1,2 @@
+[command]
+default=set -x; moxie-run prep_gpm_accumulation.py --datadir $GPM_DATA_DIR --obs $GPM_OBS_TYPE --accum_period $ACCUM_PERIOD --cycle_point $CYLC_TASK_CYCLE_POINT --max_lead $MAX_LEAD --outdir $OUTPUT_DATA --cycling_on $CYCLING_ON
\ No newline at end of file
diff --git a/app/prep_prev_fcsts/bin/get_GM_data.sh b/app/prep_prev_fcsts/bin/get_GM_data.sh
new file mode 100755
index 0000000..7a550c2
--- /dev/null
+++ b/app/prep_prev_fcsts/bin/get_GM_data.sh
@@ -0,0 +1,154 @@
+#!/usr/bin/bash -l
+
+input_date=$1
+datadir=$2
+max_lead=$3
+accum_period=$4
+
+x=60
+
+# Adjust accum_period if it's 24
+if [[ "$accum_period" == "24" ]]; then
+ x=144
+fi
+
+# This is offsetting BACKWARD from input_date, then FORWARD by accum_period
+# For 6hr at 20250126T0600Z:
+# fcst_date = 20250126T0600Z - 60H = 20250123T1800Z (WRONG!)
+# input_date = 20250126T0600Z + 6H = 20250126T1200Z
+# This doesn't make sense for your use case
+
+# BETTER APPROACH: Don't offset input_date forward
+fcst_date=$(isodatetime -u $input_date --offset -PT${max_lead}H --print-format=%Y%m%d%H%M)
+# Keep input_date as-is (it's already the valid time you want)
+# input_date stays as input_date
+
+# Loop from fcst_date to input_date at intervals of accum_period
+current_date=$fcst_date
+echo "Getting GM data from ${fcst_date} to ${input_date} at ${accum_period}-hour intervals."
+
+while [[ $current_date -le $input_date ]]; do
+
+ year=${current_date:0:4}
+ date=${current_date:0:8}
+ hour=${current_date:8:2}
+
+ echo "hour: $hour"
+
+ echo isodatetime -u $current_date
+
+ save_current_date=${date}T${hour}00Z
+ echo "Save current date is: ${save_current_date}"
+
+ ### Determine lead times based on forecast hour ###
+
+ if [[ "$hour" == "00" || "$hour" == "12" ]]; then
+ leads_end=$max_lead
+ else
+ leads_end=$max_lead
+ fi
+
+ ## Define times for global update analysis - only 000 and 006 available ##
+ analysis_runs=("000" "006")
+
+ i=0
+
+ if [[ "$accum_period" == "24" && "$hour" == "12" ]]; then
+ echo "Accumulation period is 24 and hour is 12, starting leads at 12"
+ i=$hour
+ fi
+ ### Get leads based on accum_period ###
+ leads=""
+ for ((; i<=leads_end; i+=accum_period)); do
+ leads="${leads}${i} "
+ done
+ leads=$(echo $leads) # Remove trailing space if needed
+
+
+ #################### UPDATE ANALYSES ##########################
+
+ mass-pull () {
+ local analysis=$1
+ touch query
+ cat >query <query < ${datadir}/${accum_period}_hour/${save_current_date}_gl-mn_${accum}.pp"
+ cat $list_of_files > ${datadir}/${accum_period}_hour/${save_current_date}_gl-mn_${accum}.pp
+
+ if [ $? -ne 0 ]; then
+ echo "ERROR: cat command failed"
+ echo "Files to concatenate:"
+ for f in $list_of_files; do
+ if [ -f "$f" ]; then
+ echo " EXISTS: $f ($(stat -c%s $f) bytes)"
+ else
+ echo " MISSING: $f"
+ fi
+ done
+ exit 1
+ fi
+
+ # Move to next time step
+ echo current_date=${current_date}
+ current_date=$(isodatetime -u $current_date --parse-format=%Y%m%d%H%M --offset PT${accum_period}H --print-format=%Y%m%d%H%M)
+ echo "Next time step: ${current_date}"
+
+done
+
+
+############################## FOR TRIALS #########################################
+# trial_name1=$2
+# trial_name2=$3
+# shortened_trial_name1=$(echo ${trial_name1} | tr -d '-')
+# shortened_trial_name2=$(echo ${trial_name2} | tr -d '-')
+# moo select query moose:/devfc/${trial_name1}/field.pp/ ${datadir}/${fcst_date}_${shortened_trial_name1}.pp
+# moo select query moose:/devfc/${trial_name2}/field.pp/ ${datadir}/${fcst_date}_${shortened_trial_name2}.pp
diff --git a/app/prep_prev_fcsts/bin/prep_get_GM_data.sh b/app/prep_prev_fcsts/bin/prep_get_GM_data.sh
new file mode 100755
index 0000000..943c94f
--- /dev/null
+++ b/app/prep_prev_fcsts/bin/prep_get_GM_data.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/bash -l
+
+input_date=$1
+datadir=$2
+max_lead=$3
+accum_period=$4
+
+##create fcst_date based on max_lead##
+fcst_date=$(isodatetime -u $input_date --offset -PT${max_lead}H --print-format=%Y%m%dT%H%MZ)
+
+year=${fcst_date:0:4}
+date=${fcst_date:0:8}
+hour=${fcst_date:9:2}
+
+## Define times for global update analysis - only 000 and 006 available ##
+analysis_runs=("000" "006")
+
+### Get analysis leads ###
+leads= accum_period
+
+#################### UPDATE ANALYSES ##########################
+
+mass-pull () {
+ local analysis=$1
+ touch query
+ cat >query <query < ${datadir}/${accum_period}_hour/${fcst_date}_gl-mn_${accum}.pp"
+cat $list_of_files > ${datadir}/${accum_period}_hour/${fcst_date}_gl-mn_${accum}.pp
+
+
+############################## FOR TRIALS #########################################
+# trial_name1=$2
+# trial_name2=$3
+# shortened_trial_name1=$(echo ${trial_name1} | tr -d '-')
+# shortened_trial_name2=$(echo ${trial_name2} | tr -d '-')
+# moo select query moose:/devfc/${trial_name1}/field.pp/ ${datadir}/${fcst_date}_${shortened_trial_name1}.pp
+# moo select query moose:/devfc/${trial_name2}/field.pp/ ${datadir}/${fcst_date}_${shortened_trial_name2}.pp
diff --git a/app/prep_prev_fcsts/rose-app.conf b/app/prep_prev_fcsts/rose-app.conf
new file mode 100644
index 0000000..a3865ae
--- /dev/null
+++ b/app/prep_prev_fcsts/rose-app.conf
@@ -0,0 +1,2 @@
+[command]
+default=set -x; get_GM_data.sh $CYLC_TASK_CYCLE_POINT $DATADIR $MAX_LEAD $ACCUM_PERIOD
\ No newline at end of file
diff --git a/app/process_analysis/bin/process_analysis.py b/app/process_analysis/bin/process_analysis.py
index db95b86..01f9a52 100755
--- a/app/process_analysis/bin/process_analysis.py
+++ b/app/process_analysis/bin/process_analysis.py
@@ -4,6 +4,7 @@
import logging
import argparse
from datetime import datetime, timedelta
+iris.FUTURE.save_split_attrs = True
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
@@ -14,6 +15,8 @@ def parse_args():
parser.add_argument("--datetime", required=True)
parser.add_argument("--datadir", required=True)
parser.add_argument("--outdir", required=True)
+ parser.add_argument("--accum_period", type=int, required=True)
+ parser.add_argument("--prep_hours", type=int)
args = parser.parse_args()
return args
@@ -46,69 +49,101 @@ def main():
dt = args.datetime
datadir = args.datadir
output_dir = args.outdir
+ accum_period = args.accum_period
+ prep = args.prep_hours
LOGGER.info(f" DATETIME: {dt}")
LOGGER.info(f" DATADIR: {datadir}")
LOGGER.info(f" OUTDIR: {output_dir}")
-
- for accum in [6, 24]:
- T0 = f"{datadir}/{accum}_hour/{dt}_gl-mn_T000.pp"
- Tn = f"{datadir}/{accum}_hour/{dt}_gl-mn_T{accum:03d}.pp"
-
- # CONVECTIVE ANALYSIS
- conv_cube_t0 = create_total_cube(T0, "convective")
- conv_cube_tn = create_total_cube(Tn, "convective")
- conv_t0_time_slices = iris.cube.CubeList(conv_cube_t0.slices_over(["time"]))
- conv_tn_time_slices = iris.cube.CubeList(conv_cube_tn.slices_over(["time"]))
-
- assert(len(conv_t0_time_slices) == 1)
- conv_t0_analysis = conv_t0_time_slices[0]
- conv_tn_analysis = conv_tn_time_slices[-1]
-
- check_bounds(conv_t0_analysis, 3.0)
- check_bounds(conv_tn_analysis, float(accum) + 3.0)
- LOGGER.info(f"Bounds are correct, {accum}hr difference")
-
- conv_analysis_cube = conv_tn_analysis - conv_t0_analysis
- conv_analysis_cube.rename(f"(t+{accum})-(t+0) conv analysis")
- LOGGER.info(f"conv analysis cube: {conv_analysis_cube}")
-
- # LARGE SCALE ANALYSIS
- lsr_cube_t0 = create_total_cube(T0, "large_scale")
- lsr_cube_tn = create_total_cube(Tn, "large_scale")
- lsr_t0_time_slices = iris.cube.CubeList(lsr_cube_t0.slices_over(["time"]))
- lsr_tn_time_slices = iris.cube.CubeList(lsr_cube_tn.slices_over(["time"]))
-
- assert(len(lsr_t0_time_slices) == 1)
- lsr_t0_analysis = lsr_t0_time_slices[0]
- lsr_tn_analysis = lsr_tn_time_slices[-1]
-
- check_bounds(lsr_t0_analysis, 3.0)
- check_bounds(lsr_tn_analysis, float(accum) + 3.0)
- LOGGER.info(f"Bounds are correct, {accum}hr difference")
-
- lsr_analysis_cube = lsr_tn_analysis - lsr_t0_analysis
- lsr_analysis_cube.rename(f"(t+{accum})-(t+0) large scale analysis")
- LOGGER.info(f"large scale analysis cube: {lsr_analysis_cube}")
-
- # analysis VT will be DT+accum
- dt_object = datetime.strptime(dt, "%Y%m%dT%H%MZ")
- vt_object = dt_object + timedelta(hours=accum)
- VT = vt_object.strftime("%Y%m%dT%H%MZ")
-
- # Save paths
- conv_analysis_path_to_save = f"{output_dir}/{dt}_VT{VT}_conv_analysis.nc"
- lsr_analysis_path_to_save = f"{output_dir}/{dt}_VT{VT}_lsr_analysis.nc"
-
- # Total analysis
- total_analysis = lsr_analysis_cube.copy()
- total_analysis.long_name = "Total_Precip_Accumulation"
- total_data = lsr_analysis_cube.data + conv_analysis_cube.data
- total_analysis.data = total_data
-
- total_analysis.attributes['valid_time'] = VT
- total_path_to_save = f"{output_dir}/{dt}_VT{VT}_analysis.nc"
-
- iris.save(total_analysis, total_path_to_save)
+ LOGGER.info(f" ACCUM_PERIOD: {accum_period}")
+ LOGGER.info(f" PREP_HOURS: {prep}")
+
+ # Determine which datetimes to process
+ if prep and prep > 0:
+ # Generate list of datetimes from (dt - prep_hours) to dt at intervals
+ end_dt = datetime.strptime(dt, "%Y%m%dT%H%MZ")
+ start_dt = end_dt - timedelta(hours=prep)
+ datetimes_to_process = []
+ current_dt = start_dt
+ while current_dt <= end_dt:
+ datetimes_to_process.append(current_dt.strftime("%Y%m%dT%H%MZ"))
+ current_dt += timedelta(hours=accum_period)
+ LOGGER.info(f"Processing datetimes: {datetimes_to_process}")
+ else:
+ # Just process the single datetime
+ print(f"the format of dt is now {dt}")
+ dt =datetime.strptime(dt, "%Y%m%dT%H%MZ")
+ print(f"the format of dt is now {dt}")
+ dt = dt.strftime("%Y%m%dT%H%MZ")
+ print(f"the format of dt is now {dt}")
+ datetimes_to_process = [dt]
+
+ for process_dt in datetimes_to_process:
+ LOGGER.info(f"Processing datetime: {process_dt}")
+ print(f"Processing datetime: {process_dt} for accumulation period: {accum_period} hours")
+
+
+ LOGGER.info(f"Processing datetime: {process_dt}")
+ print(f"Processing datetime: {process_dt} for accumulation period: {accum_period} hours")
+
+ for accum in [accum_period]:
+ T0 = f"{datadir}/{accum}_hour/{process_dt}_gl-mn_T000.pp"
+ Tn = f"{datadir}/{accum}_hour/{process_dt}_gl-mn_T{accum:03d}.pp"
+
+ # CONVECTIVE ANALYSIS
+ conv_cube_t0 = create_total_cube(T0, "convective")
+ conv_cube_tn = create_total_cube(Tn, "convective")
+ conv_t0_time_slices = iris.cube.CubeList(conv_cube_t0.slices_over(["time"]))
+ conv_tn_time_slices = iris.cube.CubeList(conv_cube_tn.slices_over(["time"]))
+
+ assert(len(conv_t0_time_slices) == 1)
+ conv_t0_analysis = conv_t0_time_slices[0]
+ conv_tn_analysis = conv_tn_time_slices[-1]
+
+ check_bounds(conv_t0_analysis, 3.0)
+ check_bounds(conv_tn_analysis, float(accum) + 3.0)
+ LOGGER.info(f"Bounds are correct, {accum}hr difference")
+
+ conv_analysis_cube = conv_tn_analysis - conv_t0_analysis
+ conv_analysis_cube.rename(f"(t+{accum})-(t+0) conv analysis")
+ LOGGER.info(f"conv analysis cube: {conv_analysis_cube}")
+
+ # LARGE SCALE ANALYSIS
+ lsr_cube_t0 = create_total_cube(T0, "large_scale")
+ lsr_cube_tn = create_total_cube(Tn, "large_scale")
+ lsr_t0_time_slices = iris.cube.CubeList(lsr_cube_t0.slices_over(["time"]))
+ lsr_tn_time_slices = iris.cube.CubeList(lsr_cube_tn.slices_over(["time"]))
+
+ assert(len(lsr_t0_time_slices) == 1)
+ lsr_t0_analysis = lsr_t0_time_slices[0]
+ lsr_tn_analysis = lsr_tn_time_slices[-1]
+
+ check_bounds(lsr_t0_analysis, 3.0)
+ check_bounds(lsr_tn_analysis, float(accum) + 3.0)
+ LOGGER.info(f"Bounds are correct, {accum}hr difference")
+
+ lsr_analysis_cube = lsr_tn_analysis - lsr_t0_analysis
+ lsr_analysis_cube.rename(f"(t+{accum})-(t+0) large scale analysis")
+ LOGGER.info(f"large scale analysis cube: {lsr_analysis_cube}")
+
+ # analysis VT will be DT+accum
+ dt_object = datetime.strptime(process_dt, "%Y%m%dT%H%MZ")
+ vt_object = dt_object + timedelta(hours=accum)
+ VT = vt_object.strftime("%Y%m%dT%H%MZ")
+
+ # Save paths
+ # conv_analysis_path_to_save = f"{output_dir}/{process_dt}_VT{VT}_conv_analysis.nc"
+ # lsr_analysis_path_to_save = f"{output_dir}/{process_dt}_VT{VT}_lsr_analysis.nc"
+
+ # Total analysis
+ total_analysis = lsr_analysis_cube.copy()
+ total_analysis.long_name = "precipitation_amount"
+ total_data = lsr_analysis_cube.data + conv_analysis_cube.data
+ total_analysis.data = total_data
+
+ total_analysis.attributes['valid_time'] = VT
+ total_path_to_save = f"{output_dir}/VT{VT}_analysis_{accum}hr.nc"
+
+ iris.save(total_analysis, total_path_to_save)
if __name__ == "__main__":
diff --git a/app/process_analysis/rose-app.conf b/app/process_analysis/rose-app.conf
index 5403257..5f2ab96 100755
--- a/app/process_analysis/rose-app.conf
+++ b/app/process_analysis/rose-app.conf
@@ -1,2 +1,2 @@
[command]
-default=set -x; module load $SCITOOLS; process_analysis.py --datetime $CYLC_TASK_CYCLE_POINT --datadir $DATADIR --outdir $OUTPUT_DIR
\ No newline at end of file
+default=set -x; module load $SCITOOLS; process_analysis.py --datetime $CYLC_TASK_CYCLE_POINT --datadir $DATADIR --outdir $OUTPUT_DIR --accum_period $ACCUM_PERIOD ${PREP_HOURS:+--prep_hours $PREP_HOURS}
\ No newline at end of file
diff --git a/app/process_periods/bin/process_accumulations.py b/app/process_periods/bin/process_accumulations.py
index 0de67ca..c33935b 100755
--- a/app/process_periods/bin/process_accumulations.py
+++ b/app/process_periods/bin/process_accumulations.py
@@ -6,6 +6,7 @@
from oemplotlib.cube_utils import running_accum_to_period, fix_running_cube_time, separate_realization_time
import argparse
from datetime import datetime, timedelta
+#iris.FUTURE.save_split_attrs = True
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
@@ -16,8 +17,10 @@ def parse_args():
parser.add_argument("--datetime", required=True)
parser.add_argument("--datadir", required=True)
parser.add_argument("--outdir", required=True)
+ parser.add_argument("--prep_hours", type=int)
parser.add_argument("--trial1")
parser.add_argument("--trial2")
+ parser.add_argument("--accum_period", type=int, required=True)
args = parser.parse_args()
return args
@@ -134,6 +137,7 @@ def _add_cell_methods(cube):
"plot_rain_amnt: Error accumulating rain for %s period, skipping",
period_hrs,
)
+ raise
return accum_rain
@@ -237,6 +241,7 @@ def _loader(cubelist, stash_constraint):
"plot_rain_amnt: Error accumulating rain for %s period, skipping",
period_hrs,
)
+ raise
return accum_rain
@@ -335,16 +340,39 @@ def main():
dt = args.datetime
datadir = args.datadir
output_dir = args.outdir
+ prep = args.prep_hours
+ accum = args.accum_period
LOGGER.info(f" DATETIME: {dt}")
LOGGER.info(f" DATADIR: {datadir}")
LOGGER.info(f" OUTDIR: {output_dir}")
-
- accum_periods = [6, 24]
+ LOGGER.info(f" PREP_HOURS: {prep}")
+ LOGGER.info(f" ACCUM_PERIOD: {accum}")
+
+ #accum_periods = [accum]
+
+ ##for accum in accum_periods:
+ # Determine which datetimes to process
+ if prep and prep > 0:
+ # Generate list of datetimes from (dt - prep_hours) to dt at intervals
+ end_dt = datetime.strptime(dt, "%Y%m%dT%H%MZ")
+ start_dt = end_dt - timedelta(hours=(prep)) # start time is prep hours plus accumulation period before the main datetime
+ datetimes_to_process = []
+ current_dt = start_dt
+ while current_dt <= end_dt:
+ datetimes_to_process.append(current_dt.strftime("%Y%m%dT%H%MZ"))
+ current_dt += timedelta(hours=accum) ## incerement by prep hours but may need to change this section to be in loop of accums in accum periods
+ LOGGER.info(f"Processing datetimes: {datetimes_to_process}")
+ else:
+ # Just process the single datetime
+ datetimes_to_process = [dt]
- for accum in accum_periods:
+ for process_dt in datetimes_to_process:
+ LOGGER.info(f"Processing datetime: {process_dt}")
+
+ #for accum in accum_periods:
val = accum
- file = f"{datadir}/{accum}_hour/{dt}_gl-mn_{accum:03d}.pp"
+ file = f"{datadir}/{accum}_hour/{process_dt}_gl-mn_{accum:03d}.pp"
#print(f"This is file_to_read: {filepath}")
#cubes_to_read = iris.load(filepath)
@@ -359,28 +387,93 @@ def main():
lsr_accumulations = lsr_precip.get_cube_accumulations(accum)
conv_accumulations = conv_precip.get_cube_accumulations(accum)
- total_accumulations = lsr_accumulations.copy()
- total_accumulations.long_name = "Total_Precip_Accumulation"
-
- total_data = lsr_accumulations.data + conv_accumulations.data
- total_accumulations.data = total_data
+ total_accumulations = lsr_accumulations + conv_accumulations
+ total_accumulations.long_name = "precipitation_amount"
- init_time = datetime.strptime(dt, "%Y%m%dT%H%MZ") # adjust format as needed
+ init_time = datetime.strptime(process_dt, "%Y%m%dT%H%MZ") # adjust format as needed
lead_hours = accum # or use your lead time variable
valid_time = init_time + timedelta(hours=accum)
+ print(f"init_time: {init_time}, lead_hours: {lead_hours}, valid_time: {valid_time}")
total_accumulations.attributes['valid_time'] = valid_time.strftime("%Y%m%dT%H%MZ")
- total_path_to_save = f"{output_dir}/{dt}_{accum}hr_accums.nc"
- lsr_path_to_save = f"{output_dir}/{dt}_{accum}hr_lsr_accums.nc"
- conv_path_to_save = f"{output_dir}/{dt}_{accum}hr_conv_accums.nc"
- # iris.save(lsr_accumulations, lsr_path_to_save)
- # iris.save(conv_accumulations, conv_path_to_save)
- iris.save(total_accumulations, total_path_to_save)
- # print(f"LARGE SCALE PRECIP: {lsr_accumulations}")
- # print("*********************************************")
- # print(f"CONVECTIVE PRECIP: {conv_accumulations}")
+ # ADDED: Diagnostic logging to check time coordinates
+ time_coord = total_accumulations.coord('time')
+ if time_coord:
+ LOGGER.info(f"Time coordinate points: {time_coord.points}")
+ LOGGER.info(f"Time coordinate bounds: {time_coord.bounds if time_coord.has_bounds() else 'None'}")
+
+
+ total_accumulations.long_name = "precipitation_amount"
+
+ init_time = datetime.strptime(process_dt, "%Y%m%dT%H%MZ") # adjust format as needed
+ lead_hours = accum # or use your lead time variable
+ valid_time = init_time + timedelta(hours=accum)
+ print(f"init_time: {init_time}, lead_hours: {lead_hours}, valid_time: {valid_time}")
+
+ # REMOVED: Don't set valid_time as global attribute here
+ # total_accumulations.attributes['valid_time'] = valid_time.strftime("%Y%m%dT%H%MZ")
+
+ # ADDED: Diagnostic logging to check time coordinates
+ time_coord = total_accumulations.coord('time')
+ if time_coord:
+ LOGGER.info(f"Time coordinate points: {time_coord.points}")
+ LOGGER.info(f"Time coordinate bounds: {time_coord.bounds if time_coord.has_bounds() else 'None'}")
+ LOGGER.info(f"Time coordinate shape: {time_coord.shape}")
+
+ # NEW: Split by time and save each valid time separately (like analysis files)
+ if time_coord and time_coord.shape[0] > 1:
+ LOGGER.info(f"Splitting cube with {time_coord.shape[0]} time steps into separate files")
+
+ # Iterate over each time slice
+ for time_slice in total_accumulations.slices_over('time'):
+ # Extract the valid time for this slice
+ slice_time_coord = time_slice.coord('time')
+ slice_valid_time = slice_time_coord.units.num2date(slice_time_coord.points[0])
+
+ # Remove the time dimension to match analysis file structure
+ sliced_cube = iris.util.squeeze(time_slice)
+
+ # Set the valid_time attribute for this specific slice
+ sliced_cube.attributes['valid_time'] = slice_valid_time.strftime("%Y%m%dT%H%MZ")
+
+ # Generate filename with valid time prefix (like analysis files)
+ file_to_save = f"{output_dir}/VT{slice_valid_time.strftime('%Y%m%dT%H%MZ')}_I{process_dt}_{accum}hr_accums.nc"
+
+ LOGGER.info(f"Saving {file_to_save}")
+ LOGGER.info(f"Cube shape: {sliced_cube.shape} (should be 2D: lat, lon)")
+
+ iris.save(sliced_cube, file_to_save)
+
+ else:
+ # Single time step - remove time dimension
+ LOGGER.info("Single time step detected, removing time dimension")
+ squeezed_cube = iris.util.squeeze(total_accumulations)
+
+ # Set the valid_time attribute
+ squeezed_cube.attributes['valid_time'] = valid_time.strftime("%Y%m%dT%H%MZ")
+
+ # Generate filename with valid time prefix
+ file_to_save = f"{output_dir}/VT{valid_time.strftime('%Y%m%dT%H%MZ')}_I{process_dt}_{accum}hr_accums.nc"
+
+ LOGGER.info(f"Saving {file_to_save}")
+ LOGGER.info(f"Cube shape: {squeezed_cube.shape} (should be 2D: lat, lon)")
+
+ iris.save(squeezed_cube, file_to_save)
+
+ # REMOVED: Old single-file save
+ # total_path_to_save = f"{output_dir}/{process_dt}_{accum}hr_accums.nc"
+ # lsr_path_to_save = f"{output_dir}/{process_dt}_{accum}hr_lsr_accums.nc"
+ # conv_path_to_save = f"{output_dir}/{process_dt}_{accum}hr_conv_accums.nc"
+ # iris.save(total_accumulations, total_path_to_save)
+
print(f"TOTAL PRECIP: {total_accumulations}")
+
+ #total_path_to_save = f"{output_dir}/{process_dt}_{accum}hr_accums.nc"
+
+
+ #iris.save(total_accumulations, total_path_to_save)
+
# TRIAL OPTIONS
# trial_file1 = f"//PS47/PS47_thresholdplot_data/{cube_dt}_{trial_name1}.pp"
diff --git a/app/process_periods/rose-app.conf b/app/process_periods/rose-app.conf
index bfe6e54..fdc1c52 100755
--- a/app/process_periods/rose-app.conf
+++ b/app/process_periods/rose-app.conf
@@ -1,2 +1,2 @@
[command]
-default=set -x; module load $SCITOOLS; process_accumulations.py --datetime $CYLC_TASK_CYCLE_POINT --datadir $DATADIR --outdir $OUTPUT_DIR
\ No newline at end of file
+default=set -x; moxie-run process_accumulations.py --datetime $CYLC_TASK_CYCLE_POINT --datadir $DATADIR --outdir $OUTPUT_DIR --accum_period $ACCUM_PERIOD ${PREP_HOURS:+--prep_hours $PREP_HOURS}
\ No newline at end of file
diff --git a/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_analysis_valid.ltg b/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_analysis_valid.ltg
new file mode 100644
index 0000000..89802c7
--- /dev/null
+++ b/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_analysis_valid.ltg
@@ -0,0 +1,146 @@
+# SeriesAnalysis METplus Configuration
+
+# section heading for [config] variables - all items below this line and
+# before the next section heading correspond to the [config] section
+[config]
+
+# List of applications to run - only SeriesAnalysis for this case
+PROCESS_LIST = SeriesAnalysis
+
+# time looping - options are INIT, VALID, RETRO, and REALTIME
+# If set to INIT or RETRO:
+# INIT_TIME_FMT, INIT_BEG, INIT_END, and INIT_INCREMENT must also be set
+# If set to VALID or REALTIME:
+# VALID_TIME_FMT, VALID_BEG, VALID_END, and VALID_INCREMENT must also be set
+LOOP_BY = VALID
+
+# Format of INIT_BEG and INT_END using % items
+# %Y = 4 digit year, %m = 2 digit month, %d = 2 digit day, etc.
+# see www.strftime.org for more information
+# %Y%m%d%H expands to YYYYMMDDHH
+VALID_TIME_FMT = %Y%m%dT%H%MZ
+
+# Start time for METplus run - must match INIT_TIME_FMT
+VALID_BEG={ENV[CYLC_TASK_CYCLE_POINT]}
+
+# End time for METplus run - must match INIT_TIME_FMT
+VALID_END={ENV[CYLC_TASK_CYCLE_POINT]}
+
+SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_PER_LEAD
+
+# Increment between METplus runs (in seconds if no units are specified)
+# Must be >= 60 seconds
+##VALID_INCREMENT = {ENV[ACCUM_PERIOD]}H
+
+SERIES_ANALYSIS_BLOCK_SIZE = 5160960
+
+# List of forecast leads to process for each run time (init or valid)
+# In hours if units are not specified
+# If unset, defaults to 0 (don't loop through forecast leads)
+LEAD_SEQ=begin_end_incr({ENV[LEAD_SEQ]})
+
+## For potential running on run once per init or valid
+#LEAD_SEQ_1 = begin_end_incr({ENV[LEAD_SEQ1]})
+#LEAD_SEQ_1_LABEL = {ENV[LEAD_SEQ_1_LABEL]}
+#LEAD_SEQ_2 = begin_end_incr({ENV[LEAD_SEQ2]})
+#LEAD_SEQ_2_LABEL = {ENV[LEAD_SEQ_2_LABEL]}
+
+# list of strings to loop over to run SeriesAnalysis multiple times for a
+# given run time. Each item can be referenced using the filename template
+# syntax {custom?fmt=%s}
+# Not used in this example
+SERIES_ANALYSIS_CUSTOM_LOOP_LIST =
+
+# Order of loops to process data - Options are times, processes
+# Not relevant if only one item is in the PROCESS_LIST
+# times = run all wrappers in the PROCESS_LIST for a single run time, then
+# increment the run time and run all wrappers again until all times have
+# been evaluated.
+# processes = run the first wrapper in the PROCESS_LIST for all times
+# specified, then repeat for the next item in the PROCESS_LIST until all
+# wrappers have been run
+LOOP_ORDER = processes
+
+# Verbosity of MET output - overrides LOG_VERBOSITY for SeriesAnalysis only
+LOG_SERIES_ANALYSIS_VERBOSITY = 2
+
+# set to True to add the -paired flag to the SeriesAnalysis command
+SERIES_ANALYSIS_IS_PAIRED = FALSE
+
+# Path to existing Series-Analysis output file to aggregate with
+# Aggregates partial sums (SL1L2, SAL1L2) and contingency table counts (CTC, MCTC, PCT)
+# Leave commented out if not using aggregation
+#SERIES_ANALYSIS_AGGR_FILE = {ENV[MET_OUTPUT_DIR]}/series_analysis_aggr_file_analysis.nc
+
+# Location of MET config file to pass to SeriesAnalysis
+# References CONFIG_DIR from the [dir] section
+#SERIES_ANALYSIS_CONFIG_FILE =
+
+# list of statistics to generate with SeriesAnalysis
+
+#SERIES_ANALYSIS_CAT_THRESH = [>0.1, >1, >4, >8, >16, >30, >50, >75, >100]
+#SERIES_ANALYSIS_STAT_LIST = [TOTAL, FBAR, OBAR, RMSE, MBIAS, MAE, ME, BCMSE]
+#SERIES_ANALYSIS_OUTPUT_STATS_CTS = [FBIAS, GSS];
+#SERIES_ANALYSIS_OUTPUT_STATS_CTC = [TOTAL, FY_OY, FY_ON, FN_OY, FN_ON];
+SERIES_ANALYSIS_OUTPUT_STATS_SL1L2 = [TOTAL, FBAR, OBAR, MAE]
+
+# Specify area masks for verification areas
+##SERIES_ANALYSIS_MASK_POLY = {ENV[AREA_MASKS]}
+##SERIES_ANALYSIS_MASK_GRID = FULL
+
+# grid to remap data. Value is set as the 'to_grid' variable in the 'regrid' dictionary
+# See MET User's Guide for more information
+
+# Name to identify model (forecast) data in output
+MODEL = MO_GLOBAL
+
+# Name to identify observation data in output
+OBTYPE = ANALYSIS
+
+# List of variables to compare in SeriesAnalysis - FCST_VAR1 variables correspond
+# to OBS_VAR1 variables
+# Note [FCST/OBS/BOTH]_SERIES_ANALYSIS_VAR_NAME can be used instead if different evaluations
+# are needed for different tools
+
+#### FIELDS ####
+
+FCST_VAR1_NAME = precipitation_amount
+#FCST_VAR1_LEVELS = "({valid?fmt=%Y%m%d%H},*,*)"
+FCST_VAR1_LEVELS = "(*,*)"
+OBS_VAR1_NAME = precipitation_amount
+OBS_VAR1_LEVELS = "(*,*)"
+
+# End of [config] section and start of [dir] section
+[dir]
+
+# location of configuration files used by MET applications
+CONFIG_DIR={PARM_BASE}/met_config
+
+# directory containing forecast input to SeriesAnalysis
+FCST_SERIES_ANALYSIS_INPUT_DIR = {ENV[ACCUM_DATA_DIR]}
+OBS_SERIES_ANALYSIS_INPUT_DIR = {ENV[ACCUM_DATA_DIR]}
+
+OUTPUT_BASE={ENV[OUTPUT_BASE]}
+MET_INSTALL_DIR=/data/users/cfver/METBuild/MET12.1.0
+
+# directory to write output from SeriesAnalysis
+SERIES_ANALYSIS_OUTPUT_DIR = {ENV[MET_OUTPUT_DIR]}
+
+SERIES_ANALYSIS_REGRID_WIDTH = 2
+SERIES_ANALYSIS_REGRID_METHOD = BUDGET
+SERIES_ANALYSIS_REGRID_VLD_THRESH = 0
+SERIES_ANALYSIS_REGRID_TO_GRID = FCST
+
+# End of [dir] section and start of [filename_templates] section
+[filename_templates]
+
+# Template to look for forecast input to SeriesAnalysis relative to FCST_SERIES_ANALYSIS_INPUT_DIR
+# The following template uses begin_end_incr() syntax to generate the following list:
+
+##FCST_SERIES_ANALYSIS_INPUT_TEMPLATE ={init?fmt=%Y%m%dT%2H}00Z_{ENV[ACCUM_PERIOD]}hr_accums.nc
+FCST_SERIES_ANALYSIS_INPUT_TEMPLATE = VT{valid?fmt=%Y%m%dT%2H}00Z_I{init?fmt=%Y%m%dT%2H}00Z_{ENV[ACCUM_PERIOD]}hr_accums.nc
+
+OBS_SERIES_ANALYSIS_INPUT_TEMPLATE = VT{valid?fmt=%Y%m%dT%2H}00Z_analysis_{ENV[ACCUM_PERIOD]}hr.nc
+
+# Optional subdirectories relative to SERIES_ANALYSIS_OUTPUT_DIR to write output from SeriesAnalysis
+SERIES_ANALYSIS_OUTPUT_TEMPLATE = {ENV[CYLC_TASK_CYCLE_POINT]}/SERIES_ANALYSIS_OUTPUT_AGAINST_ANALYSIS_{ENV[ACCUM_PERIOD]}hr_V{ENV[CYLC_TASK_CYCLE_POINT]}_L{lead?fmt=%HH}.nc
diff --git a/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_gpm.ltg b/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_gpm.ltg
index eeeda77..a95c7b0 100644
--- a/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_gpm.ltg
+++ b/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_gpm.ltg
@@ -26,7 +26,7 @@ INIT_BEG={ENV[START_CYCLE_POINT]}
# End time for METplus run - must match INIT_TIME_FMT
INIT_END={ENV[FINAL_CYCLE_POINT]}
-SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_PER_LEAD
+SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_FOR_EACH
# Increment between METplus runs (in seconds if no units are specified)
# Must be >= 60 seconds
diff --git a/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_gpm_valid.ltg b/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_gpm_valid.ltg
new file mode 100644
index 0000000..51fd132
--- /dev/null
+++ b/app/run_series_analysis/bin/SeriesAnalysisMETplus_precip_gpm_valid.ltg
@@ -0,0 +1,146 @@
+# SeriesAnalysis METplus Configuration
+
+# section heading for [config] variables - all items below this line and
+# before the next section heading correspond to the [config] section
+[config]
+
+# List of applications to run - only SeriesAnalysis for this case
+PROCESS_LIST = SeriesAnalysis
+
+# time looping - options are INIT, VALID, RETRO, and REALTIME
+# If set to INIT or RETRO:
+# INIT_TIME_FMT, INIT_BEG, INIT_END, and INIT_INCREMENT must also be set
+# If set to VALID or REALTIME:
+# VALID_TIME_FMT, VALID_BEG, VALID_END, and VALID_INCREMENT must also be set
+LOOP_BY = VALID
+
+# Format of INIT_BEG and INT_END using % items
+# %Y = 4 digit year, %m = 2 digit month, %d = 2 digit day, etc.
+# see www.strftime.org for more information
+# %Y%m%d%H expands to YYYYMMDDHH
+VALID_TIME_FMT = %Y%m%dT%H%MZ
+
+# Start time for METplus run - must match INIT_TIME_FMT
+VALID_BEG={ENV[CYLC_TASK_CYCLE_POINT]}
+
+# End time for METplus run - must match INIT_TIME_FMT
+VALID_END={ENV[CYLC_TASK_CYCLE_POINT]}
+
+SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_PER_LEAD
+
+# Increment between METplus runs (in seconds if no units are specified)
+# Must be >= 60 seconds
+##VALID_INCREMENT = {ENV[ACCUM_PERIOD]}H
+
+SERIES_ANALYSIS_BLOCK_SIZE = 5160960
+
+# List of forecast leads to process for each run time (init or valid)
+# In hours if units are not specified
+# If unset, defaults to 0 (don't loop through forecast leads)
+LEAD_SEQ=begin_end_incr({ENV[LEAD_SEQ]})
+
+## For potential running on run once per init or valid
+#LEAD_SEQ_1 = begin_end_incr({ENV[LEAD_SEQ1]})
+#LEAD_SEQ_1_LABEL = {ENV[LEAD_SEQ_1_LABEL]}
+#LEAD_SEQ_2 = begin_end_incr({ENV[LEAD_SEQ2]})
+#LEAD_SEQ_2_LABEL = {ENV[LEAD_SEQ_2_LABEL]}
+
+# list of strings to loop over to run SeriesAnalysis multiple times for a
+# given run time. Each item can be referenced using the filename template
+# syntax {custom?fmt=%s}
+# Not used in this example
+SERIES_ANALYSIS_CUSTOM_LOOP_LIST =
+
+# Order of loops to process data - Options are times, processes
+# Not relevant if only one item is in the PROCESS_LIST
+# times = run all wrappers in the PROCESS_LIST for a single run time, then
+# increment the run time and run all wrappers again until all times have
+# been evaluated.
+# processes = run the first wrapper in the PROCESS_LIST for all times
+# specified, then repeat for the next item in the PROCESS_LIST until all
+# wrappers have been run
+LOOP_ORDER = processes
+
+# Verbosity of MET output - overrides LOG_VERBOSITY for SeriesAnalysis only
+LOG_SERIES_ANALYSIS_VERBOSITY = 3
+
+# set to True to add the -paired flag to the SeriesAnalysis command
+SERIES_ANALYSIS_IS_PAIRED = FALSE
+
+# Location of MET config file to pass to SeriesAnalysis
+# References CONFIG_DIR from the [dir] section
+#SERIES_ANALYSIS_CONFIG_FILE =
+
+# Path to existing Series-Analysis output file to aggregate with
+# Aggregates partial sums (SL1L2, SAL1L2) and contingency table counts (CTC, MCTC, PCT)
+# Leave commented out if not using aggregation
+#SERIES_ANALYSIS_AGGR_FILE = {ENV[MET_OUTPUT_DIR]}/series_analysis_aggr_file_gpm.nc
+
+# list of statistics to generate with SeriesAnalysis
+
+#SERIES_ANALYSIS_CAT_THRESH = [>0.1, >1, >4, >8, >16, >30, >50, >75, >100]
+#SERIES_ANALYSIS_STAT_LIST = [TOTAL, FBAR, OBAR, RMSE, MBIAS, MAE, ME, ESTDEV, FSTDEV, OSTDEV, BCMSE]
+#SERIES_ANALYSIS_OUTPUT_STATS_CTS = [FBIAS, GSS];
+#SERIES_ANALYSIS_OUTPUT_STATS_CTC = [TOTAL, FY_OY, FY_ON, FN_OY, FN_ON];
+SERIES_ANALYSIS_OUTPUT_STATS_SL1L2 = [TOTAL, FBAR, OBAR, MAE]
+
+# Specify area masks for verification areas
+##SERIES_ANALYSIS_MASK_POLY = {ENV[AREA_MASKS]}
+##SERIES_ANALYSIS_MASK_GRID = FULL
+
+# grid to remap data. Value is set as the 'to_grid' variable in the 'regrid' dictionary
+# See MET User's Guide for more information
+
+# Name to identify model (forecast) data in output
+MODEL = MO_GLOBAL
+
+# Name to identify observation data in output
+OBTYPE = GPM
+
+# List of variables to compare in SeriesAnalysis - FCST_VAR1 variables correspond
+# to OBS_VAR1 variables
+# Note [FCST/OBS/BOTH]_SERIES_ANALYSIS_VAR_NAME can be used instead if different evaluations
+# are needed for different tools
+
+#### FIELDS ####
+
+FCST_VAR1_NAME = precipitation_amount
+##FCST_VAR1_LEVELS = "({valid?fmt=%Y%m%d%H},*,*)"
+FCST_VAR1_LEVELS = "(*,*)"
+
+OBS_VAR1_NAME = precipitation_amount
+OBS_VAR1_LEVELS = "(*,*)"
+
+# End of [config] section and start of [dir] section
+[dir]
+
+# location of configuration files used by MET applications
+CONFIG_DIR={PARM_BASE}/met_config
+
+# directory containing forecast input to SeriesAnalysis
+FCST_SERIES_ANALYSIS_INPUT_DIR = {ENV[ACCUM_DATA_DIR]}
+OBS_SERIES_ANALYSIS_INPUT_DIR = {ENV[ACCUM_DATA_DIR]}
+
+OUTPUT_BASE={ENV[OUTPUT_BASE]}
+MET_INSTALL_DIR=/data/users/cfver/METBuild/MET12.1.0
+
+# directory to write output from SeriesAnalysis
+SERIES_ANALYSIS_OUTPUT_DIR = {ENV[MET_OUTPUT_DIR]}
+
+SERIES_ANALYSIS_REGRID_WIDTH = 2
+SERIES_ANALYSIS_REGRID_METHOD = BUDGET
+SERIES_ANALYSIS_REGRID_VLD_THRESH = 0
+SERIES_ANALYSIS_REGRID_TO_GRID = FCST
+
+# End of [dir] section and start of [filename_templates] section
+[filename_templates]
+
+# Template to look for forecast input to SeriesAnalysis relative to FCST_SERIES_ANALYSIS_INPUT_DIR
+# The following template uses begin_end_incr() syntax to generate the following list:
+
+FCST_SERIES_ANALYSIS_INPUT_TEMPLATE = VT{valid?fmt=%Y%m%dT%2H}00Z_I{init?fmt=%Y%m%dT%2H}00Z_{ENV[ACCUM_PERIOD]}hr_accums.nc
+
+OBS_SERIES_ANALYSIS_INPUT_TEMPLATE = {ENV[ACCUM_PERIOD]}_hour_gpm/gpm_*_{valid?fmt=%Y%m%d%H}.nc
+
+# Optional subdirectories relative to SERIES_ANALYSIS_OUTPUT_DIR to write output from SeriesAnalysis
+SERIES_ANALYSIS_OUTPUT_TEMPLATE = {ENV[CYLC_TASK_CYCLE_POINT]}/SERIES_ANALYSIS_OUTPUT_AGAINST_GPM_{ENV[ACCUM_PERIOD]}hr_V{ENV[CYLC_TASK_CYCLE_POINT]}_L{lead?fmt=%HH}.nc
diff --git a/app/run_series_analysis/bin/run_code.sh b/app/run_series_analysis/bin/run_code.sh
index 0e4b8ae..a84742f 100755
--- a/app/run_series_analysis/bin/run_code.sh
+++ b/app/run_series_analysis/bin/run_code.sh
@@ -7,6 +7,6 @@ set -x; module load scitools
module use /data/users/cfver/METBuild/modules
module load MET_Stable
-run_metplus.py ${CYLC_SUITE_DEF_PATH}/app/${ROSE_TASK_APP}/run_series_analysis/bin/SeriesAnalysisMETplus_precip_gpm.ltg
+run_metplus.py ${CYLC_SUITE_DEF_PATH}/app/${ROSE_TASK_APP}/bin/SeriesAnalysisMETplus_precip_${TRUTH}_valid.ltg
##run_metplus.py SeriesAnalysisMETplus_precip_gpm.ltg
\ No newline at end of file
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..f804c9c
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..6b0ba4e
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..ee1c6d2
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..ffb652c
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/index.html b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/index.html
new file mode 100644
index 0000000..f9990d8
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/index.html
@@ -0,0 +1,367 @@
+
+
+ VerPy - 20250128T0000Z_L48_gt1 - 03 February 2026 14:31:06
+
+
+
+
+
+
+
+
+
Ran OK - 03 February 2026 14:31:06
+ (42.252 seconds)
+
+
+
+
+
Image name
+
Images
+
+
+
+ View original plot
+
+
+
+ Small
+
+
+ Medium
+
+
+ Large
+
+
+
↑ PgUp / PgDown ↓
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/output.rst b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/output.rst
new file mode 100644
index 0000000..404ca4f
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/output.rst
@@ -0,0 +1,70 @@
+Version
+----------------------------------------
+
+============================== ========================================
+Module Version/Path
+============================== ========================================
+VerPy Path: /home/users/clare.bysouth/frcb/VERSUS/r4416_749_METNetCDF/VerPy
+VerPy Version: 7.1
+NumPy Version: 1.26.4
+Matplotlib Version: 3.8.4
+Scitools Version: environments/VerPyEnv
+============================== ========================================
+
+Options
+----------------------------------------
+
+
+==================== ==================================================
+ Attribute Value
+==================== ==================================================
+ verbosity 10
+ jobid 20250128T0000Z_L48_gt1
+ plottheme
+ metadata example_viewer
+ output errormap
+ mapopts global
+ type ctc
+ system MET
+ source /data/scratch/sebastian.cole/global_precip_stand_alone/20250128T0000Z/GPM_24hr_V20250128T0000Z_L48.nc
+ params ['6hr Precipitation Accumulation (mm)']
+ stats ['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+ thresh ['>1']
+==================== ==================================================
+
+
+Data instance::
+
+ params: ['6hr Precipitation Accumulation (mm)']
+ thresh: ['>1']
+ stats: ['Hits', 'Misses', 'False Alarms', 'Correct Rejections']
+ yc: [0, 1, ..., 1918, 1919]
+ xc: [0, 1, ..., 2558, 2559]
+ fcrs: ['4800.0']
+ dates: ['20250128 00:00']
+
+Data has been modified.
+
+
+Data instance::
+
+ params: ['6hr Precipitation Accumulation (mm)']
+ thresh: ['>1']
+ stats: ['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+ yc: [0, 1, ..., 1918, 1919]
+ xc: [0, 1, ..., 2558, 2559]
+ fcrs: ['4800.0']
+ dates: ['20250128 00:00']
+
+
+VerPy.errormap
+----------------------------------------
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Hits, T+48, 20250128 00:00, Surface Obs, MO_GLOBAL
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, False Alarms, T+48, 20250128 00:00, Surface Obs, MO_GLOBAL
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Misses, T+48, 20250128 00:00, Surface Obs, MO_GLOBAL
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Correct Rejections, T+48, 20250128 00:00, Surface Obs, MO_GLOBAL
+
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/output.ssi b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/output.ssi
new file mode 100644
index 0000000..19c6900
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/output.ssi
@@ -0,0 +1,111 @@
+
+
Version
+
+
+
+
+
+
+Module
+Version/Path
+
+
+
+VerPy Path:
+/home/users/clare.bysouth/frcb/VERSUS/r4416_749_METNetCDF/VerPy
+
+VerPy Version:
+7.1
+
+NumPy Version:
+1.26.4
+
+Matplotlib Version:
+3.8.4
+
+Scitools Version:
+environments/VerPyEnv
+
+
+
+
+
+
Options
+
+
+
+
+
+
+Attribute
+Value
+
+
+
+verbosity
+10
+
+jobid
+20250128T0000Z_L48_gt1
+
+plottheme
+<VerPy plot theme: "Plotheme: default">
+
+metadata
+example_viewer
+
+output
+errormap
+
+mapopts
+global
+
+type
+ctc
+
+system
+MET
+
+source
+/data/scratch/sebastian.cole/global_precip_stand_alone/20250128T0000Z/GPM_24hr_V20250128T0000Z_L48.nc
+
+params
+['6hr Precipitation Accumulation (mm)']
+
+stats
+['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+
+thresh
+['>1']
+
+
+
+
Data instance:
+
+params: ['6hr Precipitation Accumulation (mm)']
+thresh: ['>1']
+stats: ['Hits', 'Misses', 'False Alarms', 'Correct Rejections']
+yc: [0, 1, ..., 1918, 1919]
+xc: [0, 1, ..., 2558, 2559]
+fcrs: ['4800.0']
+dates: ['20250128 00:00']
+
+
Data has been modified.
+
Data instance:
+
+params: ['6hr Precipitation Accumulation (mm)']
+thresh: ['>1']
+stats: ['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+yc: [0, 1, ..., 1918, 1919]
+xc: [0, 1, ..., 2558, 2559]
+fcrs: ['4800.0']
+dates: ['20250128 00:00']
+
+
+
+
VerPy.errormap
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Hits, T+48, 20250128 00:00, Surface Obs, MO_GLOBAL
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, False Alarms, T+48, 20250128 00:00, Surface Obs, MO_GLOBAL
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Misses, T+48, 20250128 00:00, Surface Obs, MO_GLOBAL
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Correct Rejections, T+48, 20250128 00:00, Surface Obs, MO_GLOBAL
+
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/subjobinfo.txt b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/subjobinfo.txt
new file mode 100644
index 0000000..493408d
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/subjobinfo.txt
@@ -0,0 +1 @@
+{'runlength': 42.25182604789734, 'rundatetime': '03 February 2026 14:31:06', 'processid': 271820, 'status': 'Ran OK'}
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..1f1e906
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..3052474
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..7ad4b5a
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..cc36462
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L48_gt1/warnings.txt b/app/run_verpy_plotting/20250128T0000Z_L48_gt1/warnings.txt
new file mode 100644
index 0000000..e69de29
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..f175ece
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..dbb757d
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..93300ef
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..44902b7
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/index.html b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/index.html
new file mode 100644
index 0000000..1779a12
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/index.html
@@ -0,0 +1,367 @@
+
+
+ VerPy - 20250128T0000Z_L72_gt1 - 03 February 2026 14:31:42
+
+
+
+
+
+
+
+
+
Ran OK - 03 February 2026 14:31:42
+ (33.946 seconds)
+
+
+
+
+
Image name
+
Images
+
+
+
+ View original plot
+
+
+
+ Small
+
+
+ Medium
+
+
+ Large
+
+
+
↑ PgUp / PgDown ↓
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/output.rst b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/output.rst
new file mode 100644
index 0000000..959c2af
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/output.rst
@@ -0,0 +1,70 @@
+Version
+----------------------------------------
+
+============================== ========================================
+Module Version/Path
+============================== ========================================
+VerPy Path: /home/users/clare.bysouth/frcb/VERSUS/r4416_749_METNetCDF/VerPy
+VerPy Version: 7.1
+NumPy Version: 1.26.4
+Matplotlib Version: 3.8.4
+Scitools Version: environments/VerPyEnv
+============================== ========================================
+
+Options
+----------------------------------------
+
+
+==================== ==================================================
+ Attribute Value
+==================== ==================================================
+ verbosity 10
+ jobid 20250128T0000Z_L72_gt1
+ plottheme
+ metadata example_viewer
+ output errormap
+ mapopts global
+ type ctc
+ system MET
+ source /data/scratch/sebastian.cole/global_precip_stand_alone/20250128T0000Z/GPM_24hr_V20250128T0000Z_L72.nc
+ params ['6hr Precipitation Accumulation (mm)']
+ stats ['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+ thresh ['>1']
+==================== ==================================================
+
+
+Data instance::
+
+ params: ['6hr Precipitation Accumulation (mm)']
+ thresh: ['>1']
+ stats: ['Hits', 'Misses', 'False Alarms', 'Correct Rejections']
+ yc: [0, 1, ..., 1918, 1919]
+ xc: [0, 1, ..., 2558, 2559]
+ fcrs: ['7200.0']
+ dates: ['20250128 00:00']
+
+Data has been modified.
+
+
+Data instance::
+
+ params: ['6hr Precipitation Accumulation (mm)']
+ thresh: ['>1']
+ stats: ['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+ yc: [0, 1, ..., 1918, 1919]
+ xc: [0, 1, ..., 2558, 2559]
+ fcrs: ['7200.0']
+ dates: ['20250128 00:00']
+
+
+VerPy.errormap
+----------------------------------------
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Hits, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, False Alarms, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Misses, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Correct Rejections, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/output.ssi b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/output.ssi
new file mode 100644
index 0000000..8e738c3
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/output.ssi
@@ -0,0 +1,111 @@
+
+
Version
+
+
+
+
+
+
+Module
+Version/Path
+
+
+
+VerPy Path:
+/home/users/clare.bysouth/frcb/VERSUS/r4416_749_METNetCDF/VerPy
+
+VerPy Version:
+7.1
+
+NumPy Version:
+1.26.4
+
+Matplotlib Version:
+3.8.4
+
+Scitools Version:
+environments/VerPyEnv
+
+
+
+
+
+
Options
+
+
+
+
+
+
+Attribute
+Value
+
+
+
+verbosity
+10
+
+jobid
+20250128T0000Z_L72_gt1
+
+plottheme
+<VerPy plot theme: "Plotheme: default">
+
+metadata
+example_viewer
+
+output
+errormap
+
+mapopts
+global
+
+type
+ctc
+
+system
+MET
+
+source
+/data/scratch/sebastian.cole/global_precip_stand_alone/20250128T0000Z/GPM_24hr_V20250128T0000Z_L72.nc
+
+params
+['6hr Precipitation Accumulation (mm)']
+
+stats
+['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+
+thresh
+['>1']
+
+
+
+
Data instance:
+
+params: ['6hr Precipitation Accumulation (mm)']
+thresh: ['>1']
+stats: ['Hits', 'Misses', 'False Alarms', 'Correct Rejections']
+yc: [0, 1, ..., 1918, 1919]
+xc: [0, 1, ..., 2558, 2559]
+fcrs: ['7200.0']
+dates: ['20250128 00:00']
+
+
Data has been modified.
+
Data instance:
+
+params: ['6hr Precipitation Accumulation (mm)']
+thresh: ['>1']
+stats: ['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+yc: [0, 1, ..., 1918, 1919]
+xc: [0, 1, ..., 2558, 2559]
+fcrs: ['7200.0']
+dates: ['20250128 00:00']
+
+
+
+
VerPy.errormap
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Hits, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, False Alarms, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Misses, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Correct Rejections, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/subjobinfo.txt b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/subjobinfo.txt
new file mode 100644
index 0000000..6e98fe2
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/subjobinfo.txt
@@ -0,0 +1 @@
+{'runlength': 33.94594740867615, 'rundatetime': '03 February 2026 14:31:42', 'processid': 272140, 'status': 'Ran OK'}
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..c870073
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..6405a02
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..c26ab7e
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..42c5eff
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_L72_gt1/warnings.txt b/app/run_verpy_plotting/20250128T0000Z_L72_gt1/warnings.txt
new file mode 100644
index 0000000..e69de29
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..f175ece
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..dbb757d
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..93300ef
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..44902b7
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/index.html b/app/run_verpy_plotting/20250128T0000Z_gt1/index.html
new file mode 100644
index 0000000..6b70d3d
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_gt1/index.html
@@ -0,0 +1,367 @@
+
+
+ VerPy - 20250128T0000Z_gt1 - 03 February 2026 13:51:32
+
+
+
+
+
+
+
+
+
Ran OK - 03 February 2026 13:51:32
+ (38.052 seconds)
+
+
+
+
+
Image name
+
Images
+
+
+
+ View original plot
+
+
+
+ Small
+
+
+ Medium
+
+
+ Large
+
+
+
↑ PgUp / PgDown ↓
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/output.rst b/app/run_verpy_plotting/20250128T0000Z_gt1/output.rst
new file mode 100644
index 0000000..a2b83ca
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_gt1/output.rst
@@ -0,0 +1,70 @@
+Version
+----------------------------------------
+
+============================== ========================================
+Module Version/Path
+============================== ========================================
+VerPy Path: /home/users/clare.bysouth/frcb/VERSUS/r4416_749_METNetCDF/VerPy
+VerPy Version: 7.1
+NumPy Version: 1.26.4
+Matplotlib Version: 3.8.4
+Scitools Version: environments/VerPyEnv
+============================== ========================================
+
+Options
+----------------------------------------
+
+
+==================== ==================================================
+ Attribute Value
+==================== ==================================================
+ verbosity 10
+ jobid 20250128T0000Z_gt1
+ plottheme
+ metadata example_viewer
+ output errormap
+ mapopts global
+ type ctc
+ system MET
+ source /data/scratch/sebastian.cole/global_precip_stand_alone/20250128T0000Z/GPM_24hr_V20250128T0000Z_L72.nc
+ params ['6hr Precipitation Accumulation (mm)']
+ stats ['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+ thresh ['>1']
+==================== ==================================================
+
+
+Data instance::
+
+ params: ['6hr Precipitation Accumulation (mm)']
+ thresh: ['>1']
+ stats: ['Hits', 'Misses', 'False Alarms', 'Correct Rejections']
+ yc: [0, 1, ..., 1918, 1919]
+ xc: [0, 1, ..., 2558, 2559]
+ fcrs: ['7200.0']
+ dates: ['20250128 00:00']
+
+Data has been modified.
+
+
+Data instance::
+
+ params: ['6hr Precipitation Accumulation (mm)']
+ thresh: ['>1']
+ stats: ['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+ yc: [0, 1, ..., 1918, 1919]
+ xc: [0, 1, ..., 2558, 2559]
+ fcrs: ['7200.0']
+ dates: ['20250128 00:00']
+
+
+VerPy.errormap
+----------------------------------------
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Hits, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, False Alarms, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Misses, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
+Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Correct Rejections, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/output.ssi b/app/run_verpy_plotting/20250128T0000Z_gt1/output.ssi
new file mode 100644
index 0000000..3afadc6
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_gt1/output.ssi
@@ -0,0 +1,111 @@
+
+
Version
+
+
+
+
+
+
+Module
+Version/Path
+
+
+
+VerPy Path:
+/home/users/clare.bysouth/frcb/VERSUS/r4416_749_METNetCDF/VerPy
+
+VerPy Version:
+7.1
+
+NumPy Version:
+1.26.4
+
+Matplotlib Version:
+3.8.4
+
+Scitools Version:
+environments/VerPyEnv
+
+
+
+
+
+
Options
+
+
+
+
+
+
+Attribute
+Value
+
+
+
+verbosity
+10
+
+jobid
+20250128T0000Z_gt1
+
+plottheme
+<VerPy plot theme: "Plotheme: default">
+
+metadata
+example_viewer
+
+output
+errormap
+
+mapopts
+global
+
+type
+ctc
+
+system
+MET
+
+source
+/data/scratch/sebastian.cole/global_precip_stand_alone/20250128T0000Z/GPM_24hr_V20250128T0000Z_L72.nc
+
+params
+['6hr Precipitation Accumulation (mm)']
+
+stats
+['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+
+thresh
+['>1']
+
+
+
+
Data instance:
+
+params: ['6hr Precipitation Accumulation (mm)']
+thresh: ['>1']
+stats: ['Hits', 'Misses', 'False Alarms', 'Correct Rejections']
+yc: [0, 1, ..., 1918, 1919]
+xc: [0, 1, ..., 2558, 2559]
+fcrs: ['7200.0']
+dates: ['20250128 00:00']
+
+
Data has been modified.
+
Data instance:
+
+params: ['6hr Precipitation Accumulation (mm)']
+thresh: ['>1']
+stats: ['Hits', 'False Alarms', 'Misses', 'Correct Rejections']
+yc: [0, 1, ..., 1918, 1919]
+xc: [0, 1, ..., 2558, 2559]
+fcrs: ['7200.0']
+dates: ['20250128 00:00']
+
+
+
+
VerPy.errormap
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Hits, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, False Alarms, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Misses, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
Creating 1 plots, with 1 per page, resulting in 1 landscape pages. Title: 6hr Precipitation Accumulation, >1mm, Correct Rejections, T+72, 20250128 00:00, Surface Obs, MO_GLOBAL
+
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/subjobinfo.txt b/app/run_verpy_plotting/20250128T0000Z_gt1/subjobinfo.txt
new file mode 100644
index 0000000..d8135a9
--- /dev/null
+++ b/app/run_verpy_plotting/20250128T0000Z_gt1/subjobinfo.txt
@@ -0,0 +1 @@
+{'runlength': 38.05246710777283, 'rundatetime': '03 February 2026 13:51:32', 'processid': 257366, 'status': 'Ran OK'}
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..c870073
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..6405a02
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..c26ab7e
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png b/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png
new file mode 100644
index 0000000..42c5eff
Binary files /dev/null and b/app/run_verpy_plotting/20250128T0000Z_gt1/thumbnail/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png differ
diff --git a/app/run_verpy_plotting/20250128T0000Z_gt1/warnings.txt b/app/run_verpy_plotting/20250128T0000Z_gt1/warnings.txt
new file mode 100644
index 0000000..e69de29
diff --git a/app/run_verpy_plotting/bin/verpy_plotting_precip_monitoring.py b/app/run_verpy_plotting/bin/verpy_plotting_precip_monitoring.py
new file mode 100755
index 0000000..218dff5
--- /dev/null
+++ b/app/run_verpy_plotting/bin/verpy_plotting_precip_monitoring.py
@@ -0,0 +1,123 @@
+import sys
+sys.path.append('/home/users/clare.bysouth/frcb/VERSUS/r4416_749_METNetCDF')
+import cartopy.crs as ccrs
+import VerPy
+from VerPy.datafiles import metmaps
+from VerPy.html import create_simple_subjob_viewer
+from VerPy import parameter
+from VerPy import errormap
+from VerPy import stats
+import os
+import argparse
+import numpy as np
+
+metmaps.MET_TRUTHS['ANALYSIS'] = 'Analysis'
+metmaps.MET_TRUTHS['OBS'] = 'Analysis'
+metmaps.MET_TRUTHS['GPM'] = 'Analysis'
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--vdate', required=True, help='Valid date in format YYYYMMDDTHHMM (e.g., 20250123T0000Z)')
+ parser.add_argument('--met-source', required=True, help='Path to MET source directory')
+ parser.add_argument('--outdir', required=True, help='Output directory for plots')
+ parser.add_argument('--fcrs', required=True, help='Forecast lead times as start,stop,step (e.g., 6,60,6)') # Changed to single string
+ parser.add_argument('--truth-type', required=True, help='Type of truth data to use (e.g., gpm)')
+ parser.add_argument('--accumulation', required=True, help='Accumulation period for precipitation (eg 24)')
+ args = parser.parse_args()
+ return args
+
+stats.derived.EVENT_THRESHOLD = 1
+
+args = parse_args()
+vdate = args.vdate
+met_source = args.met_source
+lead_seq = args.fcrs # This is now a string like "6,60,6"
+outdir = args.outdir
+truth_type = args.truth_type.upper() # Convert to uppercase
+accumulation = args.accumulation
+
+# Create output directory if it doesn't exist
+os.makedirs(outdir, exist_ok=True)
+print(f'Output directory: {outdir}')
+
+start, stop, step = map(int, lead_seq.split(','))
+fcrs = [str(x).zfill(2) for x in range(start, stop + step, step)] # Format with leading zeros
+
+sources = [os.path.join(met_source, vdate, f'SERIES_ANALYSIS_OUTPUT_AGAINST_{truth_type}_{accumulation}hr_V{vdate}_L{x}.nc') for x in fcrs]
+print(f'this is sources {sources}')
+
+stats_to_plot = ['053']
+
+## Create stat codes depending on truth type
+if truth_type == 'GPM':
+ verpy_class='6'
+ truth_key=0
+elif truth_type == 'ANALYSIS':
+ verpy_class='6'
+ truth_key=0
+
+stats = [int(verpy_class + stat) for stat in stats_to_plot]
+# Add additional stats that are always included
+# stats.extend([1051, 2051])
+# print(f'this is stats {stats}')
+
+for s in sources:
+ if not os.path.exists(s):
+ print(f"WARNING: File not found, skipping: {s}")
+ continue # Skip to next file instead of raising error
+
+ for area in ['global']:
+ # Extract the forecast lead time from the filename (e.g., 'L48' -> '48')
+ fcr = os.path.basename(s).split('_L')[-1].replace('.nc', '')
+ # Structure jobid so VerPy can extract metadata
+ jid = f'{vdate}_L{fcr}_{area}_{truth_type}_{accumulation}hr'
+ options = {
+ 'jobid': str(jid),
+ 'metadata': 'example_viewer',
+ 'param':(77, 129, int(accumulation)),
+ 'type': 'sl1l2',
+ 'stats': stats,
+ 'source': s,
+ 'system': 'MET',
+ 'names': truth_type,
+ 'truth': truth_key,
+ 'output': 'errormap',
+ 'vrange': [0, 0.5, 1, 2, 5, 10, 15],
+ 'mapopts': [area]} #
+ VerPy.job.run(outdir, options, verbose=False)
+
+# Create viewer after ALL plots are generated
+print(f"Creating viewer in {outdir}")
+unwanted_menus = ['plot type']
+create_simple_subjob_viewer(outdir, 'example_viewer', remove_keys=unwanted_menus)
+
+### for ctc metrics add for t in thresh: and thresh as t, {t.replace(">", "gt") to jid
+#
+# for t in thresh:
+# varying_options = [{
+# 'thresh': t,
+# 'title': '%p, %h, %s, %d Threshold: ' + str(t),}]
+# for opts in varying_options:
+# opts.update(common_options)
+# VerPy.job.run('.', opts)
+
+
+# options = {
+# 'jobid': 'PS47_plots/mibg'+trial_id+'/non_event_fbias' + str(fname),
+# 'type': 'ctc',
+# 'thresh': '>0',
+# 'stats': 7905,
+# 'source': [metfile],
+# 'expid': ['UKV'],
+# 'system': 'MET',
+# 'output': 'errormap',
+# 'names': ['UKV'],
+# 'mapopts': ['!autoscale',
+# [48, -11, 60, 3, ccrs.AzimuthalEquidistant(central_longitude=-2.5, central_latitude=54.9)]],
+# #'plotopts': ['landscape', '2x1'],
+# 'vrange': [0, 0.25, 0.75, 1.25, 2, 5],
+# 'plottheme': 'viridis:5',
+# 'title': '%p, %h, %s, %d' + ' ' + str(fname)}
+# # 'compare_in_fig': 'cases'}
+# VerPy.job.run('.', options),
+
diff --git a/app/run_verpy_plotting/example_viewer.db b/app/run_verpy_plotting/example_viewer.db
new file mode 100644
index 0000000..a8ed682
Binary files /dev/null and b/app/run_verpy_plotting/example_viewer.db differ
diff --git a/app/run_verpy_plotting/example_viewer.html b/app/run_verpy_plotting/example_viewer.html
new file mode 100644
index 0000000..e11a29d
--- /dev/null
+++ b/app/run_verpy_plotting/example_viewer.html
@@ -0,0 +1,168 @@
+
+
+
+ VerPy images
+
+
+
+
+
+
+
View gallery here
+
+
+
+
+
+
+
+
+ Plot Type
+ Fields
+ Thresh
+ Stats
+ Lead time
+ Expid
+ Page
+ Dates
+ Truth
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Lead time:
+
+
+
+
+ Please wait while the page is loading
+ ....
+
+
+
+
+
+Page produced 2026-02-03 (Job output here )
+Page created with ImageMetaTag 0.8.2
+
+
+
\ No newline at end of file
diff --git a/app/run_verpy_plotting/example_viewer.json b/app/run_verpy_plotting/example_viewer.json
new file mode 100644
index 0000000..f1bb34c
--- /dev/null
+++ b/app/run_verpy_plotting/example_viewer.json
@@ -0,0 +1 @@
+{"EM":{"6hr Precipitation Accumulation":{">1mm":{"Correct Rejections":{"T+72":{"MO_GLOBAL":{"None":{"20250128 00:00":{"Surface Obs":"20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png"}}}},"T+48":{"MO_GLOBAL":{"None":{"20250128 00:00":{"Surface Obs":"20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Correct_Rejections_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png"}}}}},"False Alarms":{"T+72":{"MO_GLOBAL":{"None":{"20250128 00:00":{"Surface Obs":"20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png"}}}},"T+48":{"MO_GLOBAL":{"None":{"20250128 00:00":{"Surface Obs":"20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_False_Alarms_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png"}}}}},"Misses":{"T+72":{"MO_GLOBAL":{"None":{"20250128 00:00":{"Surface Obs":"20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png"}}}},"T+48":{"MO_GLOBAL":{"None":{"20250128 00:00":{"Surface Obs":"20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Misses_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png"}}}}},"Hits":{"T+72":{"MO_GLOBAL":{"None":{"20250128 00:00":{"Surface Obs":"20250128T0000Z_L72_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T72_20250128_00:00_Surface_Obs_MO_GLOBAL.png"}}}},"T+48":{"MO_GLOBAL":{"Page-1":{"20250128 00:00":{"Surface Obs":"20250128T0000Z_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T48_20250128_00:00_Surface_Obs_MO_GLOBAL-1.png"}},"None":{"20250128 00:00":{"Surface Obs":"20250128T0000Z_L48_gt1/EM_6hr_Precipitation_Accumulation_gt1mm_Hits_T48_20250128_00:00_Surface_Obs_MO_GLOBAL.png"}}}}}}}}}
\ No newline at end of file
diff --git a/app/run_verpy_plotting/gallery.html b/app/run_verpy_plotting/gallery.html
new file mode 100644
index 0000000..42caa99
--- /dev/null
+++ b/app/run_verpy_plotting/gallery.html
@@ -0,0 +1,29 @@
+
+
+ VerPy - Gallery
+
+
+
+./20250128T0000Z_gt1
+
+
+
+
+
+./20250128T0000Z_L48_gt1
+
+
+
+
+
+./20250128T0000Z_L72_gt1
+
+
+
+
+
+
+
+
+
+
diff --git a/app/run_verpy_plotting/imt_dropdown.js b/app/run_verpy_plotting/imt_dropdown.js
new file mode 100644
index 0000000..b5f6ab9
--- /dev/null
+++ b/app/run_verpy_plotting/imt_dropdown.js
@@ -0,0 +1,579 @@
+// ImageMetaTag dropdown menu scripting - vn0.8.2
+// ImageMetaTag is a python package built around a wrapper for savefig in
+// matplotlib, which adds metadata tags to supported image file formats.
+// See https://github.com/SciTools-incubator/image-meta-tag for details.
+//
+// (C) Crown copyright Met Office. All rights reserved.
+// Released under BSD 3-Clause License. See LICENSE for more details.
+
+function read_parse_json_files(json_files, zlib_comp){
+ // reads a list of files that contain the json
+ // data structure. The files can be compressed
+ // using zlib compression. Very large files can
+ // be split into chunks to be consolidated back into
+ // a single structure.
+ if ( json_files.length == 1 ){
+ json = read_parse_json_file(json_files[0], zlib_comp);
+ } else {
+ var json_arr = new Array(json_files.length);
+ for (var i_js=0; i_js < json_files.length; i_js++){
+ json_arr[i_js] = read_parse_json_file(json_files[i_js], zlib_comp);
+ };
+ var json = json_arr[json_arr.length-1];
+ consolidate_json(json, json_arr);
+ };
+ return json;
+}
+
+function read_parse_json_file(json_file, zlib_comp){
+ // reads and parses a single json file
+ if (zlib_comp){
+ // binary compressed string
+ //this_blob = readBinaryFile(json_files[i_js]);
+ this_blob = readBinaryFile(json_file);
+ var json_str = pako.inflate(this_blob, {to: 'string'});
+ } else {
+ // string based compression, or direct string read:
+ var json_str = readTextFile(json_file);
+ };
+ json = JSON.parse(json_str);
+ return json;
+}
+
+function readTextFile(filepath){
+ // reads a text file and returns the content:
+ var request = new XMLHttpRequest();
+ request.open("GET", filepath, false);
+ request.send(null);
+ var returnValue = request.responseText;
+ return returnValue;
+}
+
+function readBinaryFile(url) {
+ var req = new XMLHttpRequest();
+ req.open('GET', url, false);
+ //XHR binary charset opt by Marcus Granado 2006 [http://mgran.blogspot.com]
+ req.overrideMimeType('text\/plain; charset=x-user-defined');
+ req.send(null);
+ if (req.status != 200) return '';
+ return req.responseText;
+}
+
+function consolidate_json(obj, others) {
+ // iteratively moves through a top-level json tree structure, locating
+ // strings that match '**FILE[num]**', where the num is the index of
+ // the other json files to use for that object.
+ for (var property in obj) {
+ if (obj.hasOwnProperty(property)) {
+ if (typeof obj[property] == "object") {
+ // iterate onwards!
+ consolidate_json(obj[property], others );
+ } else if (typeof obj[property] == "string"){
+ var re = new RegExp("^[*]{2}FILE");
+ if (re.test(obj[property])){
+ // now get the number, as string:
+ var thenum = obj[property].replace( /^\D+/g, '');
+ // and then Int:
+ var file_ind = parseInt(thenum, 10);
+ // now replace the object in question with the json object
+ // from the referenced file:
+ obj[property] = others[file_ind];
+ //console.log(property, obj[property], others[file_ind]);
+ };
+ };
+ };
+ };
+};
+
+function imt_main () {
+ // main function, run on load:
+ // parse the input url to see if it overides the default selection above
+ get_selection();
+ // validate that selection:
+ validate_selected_id(0);
+ // use that selection:
+ apply_selection(0);
+ // add the animation buttons
+ if (anim_sel >= 0) {add_animators();}
+}
+
+function get_selection () {
+ // get inputs from the URL passed in:
+ //console.log("Checking contents of an input URL")
+ var in_url = window.location.search;
+ //console.log(in_url)
+ // if there are inputs, on the url, read them:
+ if (in_url.length > 0) {
+ var parms = in_url.split(url_separator);
+ parms[0]=parms[0].substring(1); // strip of beginning "?"
+ // if there are the right number of & separated inputs, then use them:
+ if (parms.length == n_deep + 1){
+ if (url_type == "int"){
+ // the url has integers which directly set the selected_id:
+ for (var i_ind=0; i_ind < n_deep; i_ind++){
+ // when the id integer is passed in the url:
+ selected_id[i_ind] = parseInt(parms[i_ind]);
+ }
+ } else {
+ // the url has text which needs decoding:
+ for (var i_ind=0, l_ind=selected_id.length; i_ind < l_ind; i_ind++){
+ for (i_val=0, l_val=key_lists[i_ind].length; i_val < l_val; i_val++){
+ if (parms[i_ind] == convertToSlug(key_lists[i_ind][i_val])){
+ selected_id[i_ind] = i_val;
+ break;
+ }
+ }
+ }
+ }
+ }
+ } else if (selected_id.length == 0) {
+ // we don't have an input id, so take the first workable image
+ alert("The page is corrupted, selected_id has zero length.");
+ }
+}
+
+function apply_selection (start_depth, only_cache) {
+ // applies a selection, from a starting depth.
+ // If supplied, only_cache of true means it will only cache the selected images
+ // rather than display etc.
+
+ // set default value of only_cache:
+ only_cache = only_cache || false;
+
+ // function to run when a selection has been made
+ //console.log("at start of apply_selection, selectid_id:", selected_id)
+ //console.log("apply_selection:", selected_id)
+ // populate the available options, at each depth, for the current selection:
+ var options_at_depth = [];
+ var selected_at_depth = [];
+ // run through the selections, finding the end destination, and the selections available at each level
+ for (var i_d=0; i_d < n_deep; i_d++){
+ // at this i_d, what is the selected key:
+ var selected_key = key_lists[i_d][selected_id[i_d]];
+ // now subset the imt data strcuture, based on what is selected,
+ // so we can move on to the next level:
+ if (i_d == 0){
+ // store what keys are available at this i_d:
+ var keys_at_depth = Object.keys(imt);
+ //console.log("imt: ", imt)
+ //console.log("selected_key: ", selected_key)
+ // and proceed to subset, for the next level deeper:
+ var imt_subset = imt[selected_key];
+ //console.log(imt_subset)
+ } else {
+ var keys_at_depth = Object.keys(imt_subset);
+ imt_subset = imt_subset[selected_key];
+ }
+ // and make a note of what valid options are available to
+ options_at_depth[i_d] = sorted_by(keys_at_depth, key_lists[i_d]);
+ selected_at_depth[i_d] = selected_key;
+ // for the animator buttons, we need to keep the indices of the valid options
+ // to cycle through:
+ if (i_d == anim_sel){
+ anim_options = [];
+ for (i_opt=0, n_opts=options_at_depth[i_d].length; i_opt < n_opts; i_opt++) {
+ //console.log(".....")
+ //console.log(options_at_depth[i_d][i_opt])
+ //console.log(key_lists[i_d])
+ //console.log(key_lists[i_d].indexOf(options_at_depth[i_d][i_opt]))
+ //console.log(".....")
+ // append the index, in key_lists, of the current option
+ anim_options.push(key_lists[i_d].indexOf(options_at_depth[i_d][i_opt]))
+ // make a note if this is the current selection:
+ if (options_at_depth[i_d][i_opt] == selected_key){
+ anim_ind = i_opt;
+ }
+ }
+ }
+ }
+ if (only_cache){
+ // if we only want to cache/pre-load the current selection then:
+ cache_payload(imt_subset);
+ } else {
+ // if we have got here then we actually have the payload
+ apply_payload(imt_subset);
+ // write the selectors, to change the next page
+ update_selectors(options_at_depth, selected_at_depth, start_depth);
+ // write out the url
+ write_url_to_div();
+
+ // now the the_image div is updated, the user should be happy, so we can
+ // go backwards and forwards on the animator buttons to preload images:
+ if (anim_sel >= 0) {
+ // store the selected_id at this stage. This is the image the user wants, so mustn't be lost!
+ var stored_id = selected_id.slice();
+ // step the animator forward, but only_cache=true as we only want to cache the image:
+ animator_step_forward(true);
+ // reset the selected_id:
+ selected_id = stored_id.slice();
+ // now step back:
+ animator_step_back(true);
+ // and reset the selected_id once more:
+ selected_id = stored_id.slice();
+ };
+ };
+}
+
+function apply_payload( payload ) {
+ // applies the payload of the selected image(s) to the_image:
+ // set the string to use the the_image div:
+ var the_file = "Sorry, there is no image for that selection.
";
+ // set the file, and break the loop:
+ if (Array.isArray(payload)){
+ // how many images we have depends on what the last image is used for:
+ if (last_img_slider && payload.length > 1){
+ if (!last_img_still_show){
+ // if the last image is a slider then it won't be used as an
+ // image directly, so doesn't count:
+ var n_imgs = payload.length - 1;
+ var this_img_slider = Array(n_imgs).fill(true);
+ } else {
+ // the last image is a slider underlay, but we want to show it:
+ var n_imgs = payload.length;
+ var this_img_slider = Array(n_imgs).fill(true);
+ this_img_slider[n_imgs-1] = false
+ }
+ var slider_background = payload[payload.length - 1];
+ } else {
+ // no sliders so show all the images
+ var n_imgs = payload.length;
+ var this_img_slider = Array(n_imgs).fill(false);
+ }
+
+ // the right number of rows for a squarish box is the floor of the square root of the number of images:
+ if (n_imgs <= 3){
+ var n_cols = n_imgs;
+ //var n_rows = 1;
+ } else {
+ var n_cols = Math.ceil(Math.sqrt(n_imgs));
+ //var n_rows = Math.ceil(n_imgs / n_cols);
+ }
+ //the_file = "An array of " + n_imgs.toString() + " files goes here";
+ //the_file += ", in " + n_rows.toString() + " rows";
+ //the_file += " and " + n_cols.toString() + " columns";
+ // TODO: sort out the screen width and set the image widths appropriately, so itfits the screensize:
+ the_file = "
";
+ for (var i_img=0; i_img < n_imgs; i_img++){
+ if (i_img % n_cols == 0){ the_file += ""}
+ if (this_img_slider[i_img]){
+ the_file += "" + apply_slider(payload[i_img], slider_background, i_img) + " ";
+ } else {
+ the_file += " ";
+ }
+ }
+ the_file += "
";
+ } else {
+ the_file = "
";
+ }
+ // now set the_image div:
+ var _ = document.getElementById("the_image");
+ _.innerHTML = the_file;
+}
+
+function apply_slider(foreg, backg, i_img){
+ // given an input of a foreground image and a background image,
+ // constructs a slider between them
+ // picking up the default value from the slider
+ var slider = document.getElementById("slider_default");
+ var slider_def = slider['value'];
+ out = ''
+ out += ' '
+ out += ' '
+ out += ' '
+ return out;
+}
+
+function cache_payload( payload ){
+ // given the same input as apply_payload, this simply caches the image(s) instead:
+ //
+ if (Array.isArray(payload)){
+ var img_list = payload;
+ } else {
+ var img_list = [ payload ];
+ };
+ var n_imgs = img_list.length;
+ // now loop through the images and cache them:
+ for (var i_img=0; i_img < n_imgs; i_img++){
+ // just create an Image instance, with the src set, and it will be fetched
+ // in the background for when it's needed.
+ var cache_image = new Image();
+ cache_image.src = img_list[i_img];
+ };
+};
+
+function update_selectors(options_at_depth, selected_at_depth, start_depth) {
+ // updates the selectors with the choices valid for the current selection
+ for (var depth=start_depth, len=options_at_depth.length; depth < len; depth++){
+ // rewrite the selector for this depth:
+ update_selector(depth, options_at_depth[depth], selected_at_depth[depth]);
+ }
+}
+
+function update_selector(depth, options, selected) {
+ // updates a selector at a particular depth, with a set of options
+ // and the selected value as the current selection:
+
+ //console.log("updating sel", depth);
+ //console.log(" at div", target_div);
+ //console.log(" with options", options);
+ //console.log(" and selected val", selected);
+ if (show_singleton_selectors == 1 || key_lists[depth].length > 1){
+ target_div = key_to_selector[depth];
+
+ // set up the text to define the selector:
+ sel_text = "\n";
+ // find which optgroup is for the current depth:
+ var optgroup_depth = optgroups[depth];
+ // the number of optgroups, minus one to account for 'imt_optgroup_order'
+ n_optgroups = Object.keys(optgroup_depth).length - 1;
+ if ( n_optgroups > 0 ){
+ // now loop over the actual optgroups:
+ for (var i_optgrp=0, n_optgroups; i_optgrp";
+ // now within the optgroup, add the options:
+ for (var i_opt=0, n_opt=optgroup.length; i_opt< n_opt; i_opt++){
+ // loop over the options, and write out a line for each one:
+ //
+ // first determine if this element of the group is valid for the current
+ // selection:
+ var opt_in_options=Boolean(false);
+ for (var j_opt=0, len_j=options.length; j_opt < len_j; j_opt++){
+ // first, get the index in key_lists[depth] to which i_opt refers
+ // as not every option is used in every selection:
+ if (options[j_opt] == optgroup[i_opt]){
+ opt_in_options=Boolean(true);
+ break;
+ };
+ };
+ if (opt_in_options){
+ // now work out the j_opt that's index of key_lists[depth], because that's what's needed
+ // for the selector:
+ for (var j_opt=0, len_j=key_lists[depth].length; j_opt < len_j; j_opt++){
+ if (key_lists[depth][j_opt] == optgroup[i_opt]){break;};
+ };
+ if (optgroup[i_opt] == selected){
+ sel_text += " "+optgroup[i_opt]+" \n";
+ } else {
+ sel_text += " "+optgroup[i_opt]+" \n";
+ };
+ };
+ };
+ // close the optgroup:
+ sel_text += " ";
+ };
+ // now add any residuals:
+ var resids = optgroup_redisual[depth];
+ for (var i_opt=0, len=resids.length; i_opt < len; i_opt++){
+ // loop over the options, and write out a line for each one:
+ for (var j_opt=0, len_j=key_lists[depth].length; j_opt < len_j; j_opt++){
+ // first, get the index in key_lists[depth] to which i_opt refers
+ // as not every option is used in every selection:
+ if (key_lists[depth][j_opt] == resids[i_opt]){break};
+ }
+ if (resids[i_opt] == selected){
+ sel_text += " "+resids[i_opt]+" \n";
+ } else {
+ sel_text += " "+resids[i_opt]+" \n";
+ };
+ };
+ } else {
+ for (var i_opt=0, len=options.length; i_opt < len; i_opt++){
+ // loop over the options, and write out a line for each one:
+ for (var j_opt=0, len_j=key_lists[depth].length; j_opt < len_j; j_opt++){
+ // first, get the index in key_lists[depth] to which i_opt refers
+ // as not every option is used in every selection:
+ if (key_lists[depth][j_opt] == options[i_opt]){break};
+ };
+ if (options[i_opt] == selected){
+ sel_text += " "+options[i_opt]+" \n";
+ } else {
+ sel_text += " "+options[i_opt]+" \n";
+ };
+ };
+ };
+ // finish off the selector:
+ sel_text += " "
+ // now set the sel div:
+ var _ = document.getElementById("sel"+depth)
+ _.innerHTML = sel_text;
+ //console.log(sel_text)
+ }; // closes the test on whether this selector is to be displayed
+}
+
+function OnSelected(depth){
+ // acts to apply the selection changes for a given selector
+ //console.log("OnSelected depth:", depth);
+ new_value = document.getElementById("select_"+depth).value;
+ //console.log(selected_id);
+ selected_id[depth] = parseInt(new_value);
+ //console.log(selected_id);
+ validate_selected_id(depth+1);
+ //console.log(selected_id);
+ apply_selection(0);
+}
+
+function validate_selected_id(start_depth) {
+ // validates that a selected_id is valid, starting from a given depth
+ //console.log("validating from depth: ", start_depth)
+ // first of all, get the imt information, subsetted to the start_depth
+ //console.log(start_depth)
+ if (start_depth == 0) {
+ var imt_subset = imt;
+ } else {
+ for (var i_d=0; i_d < start_depth; i_d++){
+ var selected_key = key_lists[i_d][selected_id[i_d]];
+ if (i_d == 0){
+ var imt_subset = imt[selected_key];
+ } else {
+ imt_subset = imt_subset[selected_key];
+ }
+ }
+ }
+ //console.log("imt_subset, validation:", imt_subset)
+ for (var i_d=start_depth; i_d < n_deep; i_d++){
+ // at this i_d, what is the selected key:
+ var selected_key = key_lists[i_d][selected_id[i_d]]
+ // now subset the imt data strcuture, based on what is selected,
+ keys_this_depth = Object.keys(imt_subset)
+ //console.log(keys_this_depth, selected_key)
+ //console.log(keys_this_depth.indexOf(selected_key))
+ if (keys_this_depth.indexOf(selected_key) == -1){
+ // the currently held selection is NOT valid, so replace it:
+ //
+ // TODO: sometimes there might be a requirement to do
+ // more clever stuff here. For now:
+ // find from the sorted list of valid options:
+ keys_this_depth = sorted_by(keys_this_depth, key_lists[i_d])
+ // and pick the first one:
+ selected_key = keys_this_depth[0]
+ // and change the selected_id to point to it:
+ selected_id[i_d] = key_lists[i_d].indexOf(selected_key)
+ }
+ // the selected_id is valid for the previous selections, so subset imt and proceed:
+ imt_subset = imt_subset[selected_key]
+ }
+}
+
+function sorted_by(in_list, order_list) {
+ // sorts an in_list accoring to an order_list
+ out_list = [];
+ order_list.forEach(function(key) {
+ var found = false;
+ in_list = in_list.filter(function(item) {
+ if(!found && item == key) {
+ out_list.push(item);
+ found = true;
+ return false;
+ } else {
+ return true;
+ }
+ })
+ })
+ //console.log("sorted list: ", out_list)
+ return out_list
+}
+
+function write_url_to_div() {
+ // sets the URL to the page in the div:
+ //
+ // split on question mark: stuff after this is a javascript input:
+ qm_split = document.location.toString().split("?");
+ // split up the frame url, with "/"
+ frame_slashes = qm_split[0].split("/");
+ //alert(frame_slashes);
+ // construct the output url:
+ if (tab_name.localeCompare("None")){
+ out_url = pagename +"?"+ tab_name + url_separator;
+ }
+ else {
+ out_url = pagename +"?";
+ }
+ // add the new page's script inputs onto the end, according to the required format:
+ if (url_type == "int"){
+ // output url just has integers, directly setting the selected_id:
+ for (var i_ind=0; i_ind < n_deep; i_ind++){
+ out_url = out_url + selected_id[i_ind].toString() + url_separator ;
+ }
+ } else {
+ for (var i_ind=0; i_ind=0; i_slash--){
+ out_url = frame_slashes[i_slash]+"/"+out_url;
+ }
+ }
+ // and set the content of the div that holds the URL:
+ var _ = document.getElementById("the_url");
+ _.innerHTML = "To link to this page use this URL: "+out_url+"
";
+}
+
+function convertToSlug(Text){
+ // converts text to text suitable to be used in a URL (a slug):
+ return Text
+ .toLowerCase()
+ .replace(/[^\w ]+/g,'')
+ .replace(/ +/g,'-')
+ ;
+}
+
+function add_animators() {
+ // adds in the appropriate text to the animator buttons:
+ //
+ // a pair of buttons that each call a stepping function:
+ animator_content1 = "Step back ";
+ animator_content2 = "Step forward ";
+ // and set the content of the div that holds the URL:
+ var _1 = document.getElementById("animator1");
+ _1.innerHTML = animator_content1;
+ var _2 = document.getElementById("animator2");
+ _2.innerHTML = animator_content2;
+}
+
+function animator_step_back(only_cache){
+ // animator, stepping backwards:
+
+ // set default value of only_cache:
+ only_cache = only_cache || false;
+
+ // look for the next selected id:
+ step_selected_id(-1 * anim_dir);
+ // validate from the change:
+ validate_selected_id(anim_sel+1);
+ // use that selection:
+ apply_selection(0, only_cache);
+}
+function animator_step_forward(only_cache){
+ // animator, stepping forwards:
+
+ // set default value of only_cache:
+ only_cache = only_cache || false;
+
+ // look for the next selected id:
+ step_selected_id(1 * anim_dir);
+ // validate from the change:
+ validate_selected_id(anim_sel+1);
+ // use that selection:
+ apply_selection(0, only_cache);
+}
+function step_selected_id(incr){
+ // function to step through to the next selected_id
+ var current_ind = anim_options.indexOf(selected_id[anim_sel]);
+ var new_ind = current_ind + incr;
+ if (new_ind < 0){
+ new_ind = anim_options.length -1;
+ } else if (new_ind >= anim_options.length){
+ new_ind = 0;
+ }
+ selected_id[anim_sel] = anim_options[new_ind];
+}
diff --git a/app/run_verpy_plotting/index.html b/app/run_verpy_plotting/index.html
new file mode 100644
index 0000000..2ddd349
--- /dev/null
+++ b/app/run_verpy_plotting/index.html
@@ -0,0 +1,452 @@
+
+
+ VerPy - run_verpy_plotting - 03 February 2026 14:31:42
+
+
+
+
+
+
+
+
run_verpy_plotting
+ Ran OK - 03 February 2026 14:31:42
+ (34.504 seconds)
+
+
+
+
+
Summary
+
+
+
+
Image name
+
Images
+
+
+
+ View original plot
+
+
+
+ Small
+
+
+ Medium
+
+
+ Large
+
+
+
↑ PgUp / PgDown ↓
+
+
+ 20250128T0000Z_gt1
+
+
+
+
+ 20250128T0000Z_L48_gt1
+
+
+
+
+ 20250128T0000Z_L72_gt1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/app/run_verpy_plotting/options.txt b/app/run_verpy_plotting/options.txt
new file mode 100644
index 0000000..593affb
--- /dev/null
+++ b/app/run_verpy_plotting/options.txt
@@ -0,0 +1 @@
+[{'jobid': '20250128T0000Z_L72_gt1', 'metadata': 'example_viewer', 'param': (77, 129, 0), 'type': 'ctc', 'thresh': '>1', 'stats': [7921, 7922, 7923, 7924], 'source': '/data/scratch/sebastian.cole/global_precip_stand_alone/20250128T0000Z/GPM_24hr_V20250128T0000Z_L72.nc', 'system': 'MET', 'output': 'errormap', 'mapopts': ['global']}]
diff --git a/app/run_verpy_plotting/output.rst b/app/run_verpy_plotting/output.rst
new file mode 100644
index 0000000..e69de29
diff --git a/app/run_verpy_plotting/output.ssi b/app/run_verpy_plotting/output.ssi
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/app/run_verpy_plotting/output.ssi
@@ -0,0 +1 @@
+
diff --git a/app/run_verpy_plotting/rose-app.conf b/app/run_verpy_plotting/rose-app.conf
new file mode 100644
index 0000000..73c29e9
--- /dev/null
+++ b/app/run_verpy_plotting/rose-app.conf
@@ -0,0 +1,5 @@
+[command]
+default=verpy-run python $ROSE_SUITE_DIR/app/run_verpy_plotting/bin/verpy_plotting_precip_monitoring.py --vdate $CYLC_TASK_CYCLE_POINT --fcrs $LEAD_SEQ --met-source $MET_SOURCE --outdir $OUTDIR --truth-type $TRUTH --accumulation $ACCUM_PERIOD
+
+[env]
+# Environment variables are already set in flow.cylc
\ No newline at end of file
diff --git a/bin/verpy-run b/bin/verpy-run
new file mode 100755
index 0000000..5c19f50
--- /dev/null
+++ b/bin/verpy-run
@@ -0,0 +1,14 @@
+#!/bin/bash -l
+#
+# Usage scitool-run CMD_WITHOPTS
+#
+# ENVIRONMENT
+# VERPY_PATH The version of the software stack you want to run
+
+module use /data/users/cfsb/modulefiles
+
+export VERPY_DIR='/home/users/clare.bysouth/frcb/VERSUS/r4416_749_METNetCDF'
+
+module load verpy/developer
+
+exec "$@"
\ No newline at end of file
diff --git a/flow.cylc b/flow.cylc
index 8e5112d..1309aad 100755
--- a/flow.cylc
+++ b/flow.cylc
@@ -1,131 +1,225 @@
#!jinja2
+{% set period_schedules = {6: "PT6H", 24: "T00,T12"} %}
[scheduler]
UTC mode = True
[[events]]
abort on stall timeout = True
- mail events = inactivity, stalled, abort on stall timeout
+ mail events = inactivity, stalled
workflow timeout = P1D
+[task parameters]
+ periods = 6, 24
+ truths = gpm, analysis
+
[scheduling]
initial cycle point = {{START_CYCLE_POINT}}
{% if FINAL_CYCLE_POINT | default(False) %}
- final cycle point = {{FINAL_CYCLE_POINT}}
+ final cycle point = {{FINAL_CYCLE_POINT}}
{% endif %}
+ runahead limit = PT3H
- [[graph]]
- PT6H = """
- (get_data & get_gpm) => (process_analysis & process_periods) => run_series_analysis
+ [[graph]]
+ # Initial prep tasks (first 00Z and 12Z only)
+ R1/T00 = """
+ prep_gpm & prep_prev_fcsts => get_data & get_gpm => process_periods_with_prep => process_analysis_with_prep
"""
- ## For running live, and ! before final cylce point in rose-suite.conf ##
- ##PT6H = """
- ## @wall_clock => (get_data & get_gpm) => (process_analysis & process_periods) => run_series_analysis
- ##"""
- #+PT6H/PT6H = """
- # process_periods[-PT6H] => get_data
- #"""
- ##PT24H = """
- # ## process_periods => run_series_analysis
- ## """
- {% if FINAL_CYCLE_POINT | default(False) %}
- R1/{{FINAL_CYCLE_POINT}} = """
- run_series_analysis
+ R1/T12 = """
+ prep_gpm & prep_prev_fcsts => get_data & get_gpm => process_periods_with_prep => process_analysis_with_prep
"""
- {% endif %}
-[runtime]
+ # 6hr period: runs every 6 hours, both truths
+ PT6H!{{START_CYCLE_POINT}} = """
+ get_data => process_analysis => run_series_analysis
+ get_gpm => process_periods => run_series_analysis
+ run_series_analysis => run_verpy_plotting
+ """
+ # 24hr period at 00Z: both analysis and GPM
+ T00!{{START_CYCLE_POINT}} = """
+ get_data => process_analysis => run_series_analysis
+ get_gpm => process_periods => run_series_analysis
+ run_series_analysis => run_verpy_plotting
+ """
+
+ # 24hr period at 12Z: GPM only (no analysis)
+ T12!{{START_CYCLE_POINT}} = """
+ get_data & get_gpm => process_periods
+ """
+
+[runtime]
[[root]]
script = rose task-run
-
[[[environment]]]
SCITOOLS = {{SCITOOLS}}
OEMPLOT_CONDA_ENV = {{OEMPLOT_CONDA_ENV}}
+ [[SET_MAX_LEAD]]
+ pre-script = """
+ CYCLE_HOUR=${CYLC_TASK_CYCLE_POINT:9:2}
+ if [[ "$ACCUM_PERIOD" == "6" ]]; then
+ export MAX_LEAD=60
+ elif [[ "$ACCUM_PERIOD" == "24" ]]; then
+ export MAX_LEAD=144
+ fi
+ echo "Setting MAX_LEAD=$MAX_LEAD for period=$ACCUM_PERIOD at ${CYCLE_HOUR}Z"
+ """
- [[get_data]]
- execution time limit = PT4H
+ [[prep_prev_fcsts]]
+ inherit = SET_MAX_LEAD
+ execution time limit = PT6H
execution retry delays = 12*PT30M
-
[[[environment]]]
- START_CYCLE_POINT={{START_CYCLE_POINT}}
- DATADIR={{DATA_DIR}}
-
+ ROSE_TASK_APP = prep_prev_fcsts
+ START_CYCLE_POINT = {{START_CYCLE_POINT}}
+ DATADIR = {{DATA_DIR}}
+ ACCUM_PERIOD = %(periods)s
[[[directives]]]
- -l mem=2gb
+ -l mem = 2gb
-q = shared
-
- [[get_gpm]]
- platform = spice
-
- env-script = """
- # provide an environment for a cycling suite task
- eval $(rose task-env)
- """
+ [[prep_gpm]]
+ inherit = SET_MAX_LEAD
+ platform = spice
+ env-script = "eval $(rose task-env)"
[[[environment]]]
- ROSE_TASK_APP=get_gpm
- DATADIR={{DATA_DIR}}
- GPM_DATA_DIR={{GPM_DATA_DIR}}
- GPM_OBS_TYPE={{GPM_OBS_TYPE}}
- OUTPUT_DATA={{PROCESSED_DIR}}
- CYLCING_ON={{CYLCING_ON}}
- MAX_LEAD={{MAX_LEAD}}
- ACCUM_PERIOD={{ACCUM_PERIOD}}
-
+ ROSE_TASK_APP = prep_gpm
+ DATADIR = {{DATA_DIR}}
+ GPM_DATA_DIR = {{GPM_DATA_DIR}}
+ GPM_OBS_TYPE = {{GPM_OBS_TYPE}}
+ OUTPUT_DATA = {{PROCESSED_DIR}}
+ ACCUM_PERIOD = %(periods)s
+ CYCLING_ON = {{PREP}}
[[[directives]]]
- --mem=10G
- --time=10
+ --mem = 10G
+ --time = 10
+ [[get_data]]
+ inherit = SET_MAX_LEAD
+ execution time limit = PT4H
+ execution retry delays = 12*PT30M
+ [[[environment]]]
+ ROSE_TASK_APP = get_data
+ DATADIR = {{DATA_DIR}}
+ ACCUM_PERIOD = %(periods)s
+ [[[directives]]]
+ -l mem = 2gb
+ -q = shared
- [[process_analysis]]
+ [[get_gpm]]
+ inherit = SET_MAX_LEAD
platform = spice
-
+ env-script = "eval $(rose task-env)"
[[[environment]]]
- OUTPUT_DIR={{PROCESSED_DIR}}
- DATADIR={{DATA_DIR}}
-
-
- [[[directives]]]
- --mem = 20G
- --time = 90
-
+ ROSE_TASK_APP = prep_gpm
+ DATADIR = {{DATA_DIR}}
+ GPM_DATA_DIR = {{GPM_DATA_DIR}}
+ GPM_OBS_TYPE = {{GPM_OBS_TYPE}}
+ OUTPUT_DATA = {{PROCESSED_DIR}}
+ CYCLING_ON = {{CYCLING_ON}}
+ ACCUM_PERIOD = %(periods)s
+ [[[directives]]]
+ --mem = 10G
+ --time = 10
- [[process_periods]]
+ [[PROCESS]]
platform = spice
-
[[[environment]]]
- OUTPUT_DIR={{PROCESSED_DIR}}
- DATADIR={{DATA_DIR}}
+ OUTPUT_DIR = {{PROCESSED_DIR}}
+ DATADIR = {{DATA_DIR}}
+ ACCUM_PERIOD = %(periods)s
+ [[[directives]]]
+ --mem = 20G
+ --time = 90
+ [[process_analysis]]
+ inherit = PROCESS
+ [[[environment]]]
+ ROSE_TASK_APP = process_analysis
+
+ [[process_analysis_with_prep]]
+ inherit = PROCESS, SET_MAX_LEAD
+ script = """
+ export PREP_HOURS=${MAX_LEAD:-60}
+ echo "Setting PREP_HOURS=$PREP_HOURS for analysis backfill"
+ rose task-run
+ """
+ [[[environment]]]
+ ROSE_TASK_APP = process_analysis
- [[[directives]]]
- --mem = 20G
- --time = 90
+ [[process_periods]]
+ inherit = PROCESS
+ [[[environment]]]
+ ROSE_TASK_APP = process_periods
+
+ [[process_periods_with_prep]]
+ inherit = PROCESS, SET_MAX_LEAD
+ script = """
+ export PREP_HOURS=${MAX_LEAD:-144}
+ echo "Setting PREP_HOURS=$PREP_HOURS from MAX_LEAD"
+ rose task-run
+ """
+ [[[environment]]]
+ ROSE_TASK_APP = process_periods
- [[run_met]]
+ [[run_series_analysis]]
platform = spice
+ env-script = "eval $(rose task-env)"
+ pre-script = """
+ CYCLE_HOUR=${CYLC_TASK_CYCLE_POINT:9:2}
+ if [[ "$ACCUM_PERIOD" == "6" ]]; then
+ export LEAD_SEQ=6,60,6
+ export MAX_LEAD=60
+ elif [[ "$ACCUM_PERIOD" == "24" ]]; then
+ if [[ "$CYCLE_HOUR" == "00" ]]; then
+ export LEAD_SEQ=36,144,12
+ else
+ export LEAD_SEQ=36,132,24
+ fi
+ export MAX_LEAD=144
+ fi
+ echo "Setting LEAD_SEQ=$LEAD_SEQ and MAX_LEAD=$MAX_LEAD for period=$ACCUM_PERIOD at ${CYCLE_HOUR}Z"
+ """
+ [[[environment]]]
+ ROSE_TASK_APP = run_series_analysis
+ FINAL_CYCLE_POINT = {{FINAL_CYCLE_POINT}}
+ ACCUM_DATA_DIR = {{PROCESSED_DIR}}
+ DATADIR = {{DATA_DIR}}
+ OUTPUT_BASE = {{OUTPUT_BASE}}
+ AREA_MASKS = {{AREA_MASKS}}
+ ACCUM_PERIOD = %(periods)s
+ TRUTH = %(truths)s
+ MET_OUTPUT_DIR = {{MET_OUTPUT_DIR}}
+ [[[directives]]]
+ --mem = 20G
+ --time = 360
- [[run_series_analysis]]
+ [[run_verpy_plotting]]
platform = spice
- env-script = """
- # provide an environment for a cycling suite task
- eval $(rose task-env)
+ env-script = "eval $(rose task-env)"
+ pre-script = """
+ CYCLE_HOUR=${CYLC_TASK_CYCLE_POINT:9:2}
+ if [[ "$ACCUM_PERIOD" == "6" ]]; then
+ export LEAD_SEQ=6,60,6
+ elif [[ "$ACCUM_PERIOD" == "24" ]]; then
+ if [[ "$CYCLE_HOUR" == "00" ]]; then
+ export LEAD_SEQ=12,144,12
+ else
+ export LEAD_SEQ=36,132,24
+ fi
+ fi
+ echo "Setting LEAD_SEQ=$LEAD_SEQ for VerPy plotting, period=$ACCUM_PERIOD at ${CYCLE_HOUR}Z"
"""
[[[environment]]]
- START_CYCLE_POINT={{START_CYCLE_POINT}}
- FINAL_CYCLE_POINT={{FINAL_CYCLE_POINT}}
- ACCUM_DATA_DIR={{PROCESSED_DIR}}
- DATADIR={{DATA_DIR}}
- OUTPUT_BASE={{OUTPUT_BASE}}
- AREA_MASKS={{AREA_MASKS}}
-
+ ROSE_TASK_APP = run_verpy_plotting
+ MET_SOURCE = {{MET_OUTPUT_DIR}}
+ OUTDIR = {{OUTPUT_BASE}}/verpy_plots
+ ACCUM_PERIOD = %(periods)s
+ TRUTH = %(truths)s
[[[directives]]]
- --mem = 20G
- --time = 90
-
-
+ --mem = 10G
+ --time = 60
[[housekeep]]
[[[environment]]]
- ROSE_TASK_APP=housekeep
\ No newline at end of file
+ ROSE_TASK_APP = housekeep
\ No newline at end of file
diff --git a/rose-suite.conf b/rose-suite.conf
index b654c2b..37681f4 100755
--- a/rose-suite.conf
+++ b/rose-suite.conf
@@ -1,9 +1,9 @@
[template variables]
-START_CYCLE_POINT='20241121T1200Z'
-FINAL_CYCLE_POINT='20241121T1200Z'
+START_CYCLE_POINT='20250126T0000Z'
+FINAL_CYCLE_POINT='20250129T0000Z'
SCITOOLS='scitools/default-next'
OEMPLOT_CONDA_ENV='/home/users/uktstbed/conda_envs/oemplotlib/py38_iris32_20250124'
-CYLCING_ON='VT'
+CYCLING_ON='VT'
TRIAL1='mi-bg180'
TRIAL2='mi-bg181'
DATA_DIR='/data/users/sebastian.cole/GM_precip'
@@ -12,7 +12,8 @@ OUTPUT_BASE='/data/scratch/sebastian.cole/GM_precip_testing'
GPM_DATA_DIR='/data/users/gpm_imerg'
GPM_OBS_TYPE='GPM_NRTlate'
###AREA_MASKS='/data/users/cfver/MET_Repos/AreaMasks/poly_euro.nc'
-AREA_MASKS=['/data/users/sebastian.cole/GM_precip/poly_euro.nc', '/data/users/sebastian.cole/GM_precip/poly_globe.nc']
-ACCUM_PERIOD=6
-MAX_LEAD=72
+AREA_MASKS=['/data/users/sebastian.cole/GM_precip/poly_euro.nc', '/data/users/sebastian.cole/GM_precip/poly_globe.nc']
+PREP='DT'
+MET_OUTPUT_DIR='/data/scratch/sebastian.cole/global_precip_stand_alone'
+
\ No newline at end of file