Skip to content

TECHDEBT: Cronjob Fix #1536

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Mar 28, 2025
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ telemetry-cronjob: ## Run the telemetry cronjob
@echo "==============================================="
@echo "Telemetry Cronjob"
@echo "==============================================="
@docker compose exec api npx ts-node src/cronjobs/telemetry/index --_test_maxDevices 4 -- $(args)
@docker compose exec api npx ts-node src/cronjobs/telemetry/index --deviceLimit=2 -- $(args)
## ------------------------------------------------------------------------------
## Database migration commands
## ------------------------------------------------------------------------------
Expand Down
4 changes: 4 additions & 0 deletions api/.pipeline/config.js
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ const phases = {
changeId: deployChangeId,
telemetryCronjobSchedule: '0 0 * * *', // Daily at midnight
telemetryCronjobDisabled: !isStaticDeployment,
telemetryCronjobDeviceLimit: (isStaticDeployment && '-1') || '2', // For PR's, limit the number of devices to 2 per vendor
suffix: `-dev-${deployChangeId}`,
instance: `${name}-dev-${deployChangeId}`,
version: `${deployChangeId}-${changeId}`,
Expand Down Expand Up @@ -117,6 +118,7 @@ const phases = {
changeId: deployChangeId,
telemetryCronjobSchedule: '0 0 * * *', // Daily at midnight
telemetryCronjobDisabled: !isStaticDeployment,
telemetryCronjobDeviceLimit: '-1', // -1 means no limit
suffix: `-test`,
instance: `${name}-test`,
version: `${version}`,
Expand Down Expand Up @@ -161,6 +163,7 @@ const phases = {
changeId: deployChangeId,
telemetryCronjobSchedule: '0 0 * * *', // Daily at midnight
telemetryCronjobDisabled: !isStaticDeployment,
telemetryCronjobDeviceLimit: '-1', // -1 means no limit
suffix: `-test-spi`,
instance: `${name}-spi-test-spi`,
version: `${version}`,
Expand Down Expand Up @@ -205,6 +208,7 @@ const phases = {
changeId: deployChangeId,
telemetryCronjobSchedule: '0 0 * * *', // Daily at midnight
telemetryCronjobDisabled: !isStaticDeployment,
telemetryCronjobDeviceLimit: '-1', // -1 means no limit
suffix: `-prod`,
instance: `${name}-prod`,
version: `${version}`,
Expand Down
1 change: 1 addition & 0 deletions api/.pipeline/lib/api.deploy.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ const apiDeploy = async (settings) => {
// Cronjobs
TELEMETRY_CRONJOB_SCHEDULE: phases[phase].telemetryCronjobSchedule,
TELEMETRY_CRONJOB_DISABLED: phases[phase].telemetryCronjobDisabled,
TELEMETRY_CRONJOB_DEVICE_LIMIT: phases[phase].telemetryCronjobDeviceLimit,
// Node
NODE_ENV: phases[phase].nodeEnv,
NODE_OPTIONS: phases[phase].nodeOptions,
Expand Down
20 changes: 19 additions & 1 deletion api/.pipeline/templates/api.dc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,9 @@ parameters:
- name: TELEMETRY_CRONJOB_DISABLED
description: Boolean flag to disable the cronjob, only static deployments should run on schedule.
value: 'true'
- name: TELEMETRY_CRONJOB_DEVICE_LIMIT
description: Limit the number of devices to process in the telemetry cronjob. Used for PR cronjob deployments.
value: '-1'
- name: TELEMETRY_SECRET
description: The name of the Openshift Biohubbc telemetry secret
value: biohubbc-telemetry
Expand Down Expand Up @@ -557,6 +560,7 @@ objects:
role: telemetry-cronjob
spec:
schedule: ${TELEMETRY_CRONJOB_SCHEDULE}
# By default the PR cronjobs are disabled
suspend: ${{TELEMETRY_CRONJOB_DISABLED}}
concurrencyPolicy: 'Forbid'
successfulJobsHistoryLimit: 1
Expand Down Expand Up @@ -634,8 +638,22 @@ objects:
value: ${API_RESPONSE_VALIDATION_ENABLED}
- name: DATABASE_RESPONSE_VALIDATION_ENABLED
value: ${DATABASE_RESPONSE_VALIDATION_ENABLED}
command: ['npm', 'run', 'telemetry-cronjob', '-- "--batchSize 1000 --concurrently 100"']
command: [
'node',
'src/cronjobs/telemetry/index',
'--',
'--batchSize=1000',
'--concurrently=100',
'--deviceLimit=${TELEMETRY_CRONJOB_DEVICE_LIMIT}',
]
volumeMounts:
- name: ${NAME}${SUFFIX}
mountPath: /opt/app-root/src/data
restartPolicy: Never
volumes:
- name: ${NAME}${SUFFIX}
persistentVolumeClaim:
claimName: ${NAME}${SUFFIX}

# Disable the HPA for now, as it is preferrable to run an exact number of pods (e.g. min:2, max:2)
# - kind: HorizontalPodAutoscaler
Expand Down
2 changes: 2 additions & 0 deletions api/src/cronjobs/telemetry/cronjob.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ describe('Telemetry Cronjob', () => {
sinon.stub(cronjob, 'parseArguments').returns({
concurrently: 2,
batchSize: 4,
deviceLimit: -1,
startDate: undefined,
endDate: undefined
});
Expand Down Expand Up @@ -70,6 +71,7 @@ describe('Telemetry Cronjob', () => {
sinon.stub(cronjob, 'parseArguments').returns({
concurrently: 2,
batchSize: 4,
deviceLimit: -1,
startDate: undefined,
endDate: undefined
});
Expand Down
40 changes: 27 additions & 13 deletions api/src/cronjobs/telemetry/cronjob.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,17 @@

const defaultLog = getLogger('telemetry-cronjob');

// Process all devices by default
const PROCESS_ALL_DEVICES = -1;

/**
* Telemetry Cronjob: Handles fetching Vectronic and Lotek telemetry and inserting it into the database.
*
* Information:
*
* How to run:
* - Default: `npm run telemetry-cronjob` // defaults to: concurrently = 100 and batchSize = 1000
* - CLI args: `npm run telemetry-cronjob -- --concurrently 100 --batchSize 1000 --startDate 2021-01-01 --endDate 2021-01-31`
* - Default: `npm run telemetry-cronjob` // defaults to: concurrently=100, batchSize=1000 and deviceLimit=-1 (all devices)
* - CLI args: `npm run telemetry-cronjob -- --concurrently=100 --batchSize=1000 --startDate=2021-01-01 --endDate=2021-01-31 --deviceLimit=-1`
*
* Telemetry device processing flow:
* 1. Fetch the telemetry count from the vendor API.
Expand Down Expand Up @@ -61,10 +64,10 @@
let lotekDevices = await lotekService.fetchDevicesFromLotek(); // Fetch the lotek account devices
let vectronicDevices = await vectronicService.getDeviceCredentials(); // Fetch the vectronic account devices

// Optional device limit for testing
if (args._test_maxDevices) {
lotekDevices = lotekDevices.slice(0, args._test_maxDevices);
vectronicDevices = vectronicDevices.slice(0, args._test_maxDevices);
// Limit the number of devices to process (useful when limiting PR cronjobs)
if (args.deviceLimit !== PROCESS_ALL_DEVICES) {
lotekDevices = lotekDevices.slice(0, args.deviceLimit);
vectronicDevices = vectronicDevices.slice(0, args.deviceLimit);

Check warning on line 70 in api/src/cronjobs/telemetry/cronjob.ts

View check run for this annotation

Codecov / codecov/patch

api/src/cronjobs/telemetry/cronjob.ts#L69-L70

Added lines #L69 - L70 were not covered by tests
}

// 3. GENERATE QUEUEABLE TASKS - Create tasks for each device
Expand All @@ -74,8 +77,19 @@

// 4. PROCESS TELEMETRY - Fetch telemetry from the vendor API and insert it into the SIMS database
defaultLog.info({ message: 'Processing telemetry.' });
const lotekResults = await lotekService.processTelemetry(lotekTasks, args);
const vectronicResults = await vectronicService.processTelemetry(vectronicTasks, args);
const lotekResults = await lotekService.processTelemetry(lotekTasks, {
concurrently: args.concurrently,
batchSize: args.batchSize,
startDate: args.startDate,
endDate: args.endDate
});

const vectronicResults = await vectronicService.processTelemetry(vectronicTasks, {
concurrently: args.concurrently,
batchSize: args.batchSize,
startDate: args.startDate,
endDate: args.endDate
});

// 5. PARSE RESULTS - Parse the telemetry processing results for logging
const parsedLotek = parseResults('Lotek', lotekResults);
Expand Down Expand Up @@ -145,12 +159,12 @@
concurrently: { type: 'string', default: '100' },
// The number of items to insert in a single batch
batchSize: { type: 'string', default: '1000' },
// The maximum number of devices to process
deviceLimit: { type: 'string', default: PROCESS_ALL_DEVICES.toString() },
// The start date for fetching telemetry data
startDate: { type: 'string' },
// The end date for fetching telemetry data
endDate: { type: 'string' },
// The maximum number of devices to process (for testing)
_test_maxDevices: { type: 'string' }
endDate: { type: 'string' }
},
allowPositionals: true
});
Expand All @@ -159,9 +173,9 @@
.object({
concurrently: z.coerce.number(),
batchSize: z.coerce.number(),
deviceLimit: z.coerce.number(),
startDate: z.string().optional(),
endDate: z.string().optional(),
_test_maxDevices: z.coerce.number().optional()
endDate: z.string().optional()
})
.strict()
.parse(parsedArgs.values);
Expand Down
6 changes: 1 addition & 5 deletions api/src/utils/logger.ts
Original file line number Diff line number Diff line change
Expand Up @@ -104,18 +104,14 @@ export const _getLoggerParameters = (logLabel: string, params: CustomLoggerParam
* @return {*} {string[]}
*/
const _getLoggerTransportTypes = (): string[] => {
const transportTypes = [];
const transportTypes = ['console'];

// Do not output logs to file when running unit tests
// Note: Both lifecycle events are needed to prevent log files ie: `npm run test` or `npm run test-watch`
if (process.env.npm_lifecycle_event !== 'test' && process.env.npm_lifecycle_event !== 'test-watch') {
transportTypes.push('file');
}

if (process.env.NODE_ENV !== 'production') {
transportTypes.push('console');
}

return transportTypes;
};

Expand Down
2 changes: 1 addition & 1 deletion app/.pipeline/config.js
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ const phases = {
maxUploadFileSize,
nodeEnv: 'production',
sso: config.sso.prod,
featureFlags: 'APP_FF_SUBMIT_BIOHUB,APP_FF_DISABLE_BAD_DEPLOYMENT_DELETE',
featureFlags: 'APP_FF_SUBMIT_BIOHUB',
cpuRequest: '50m',
cpuLimit: '1000m',
memoryRequest: '100Mi',
Expand Down
Loading