forked from bahrmichael/aws-scheduler
-
Notifications
You must be signed in to change notification settings - Fork 1
/
scheduler.py
108 lines (92 loc) · 3.71 KB
/
scheduler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import json
import math
import os
from datetime import datetime
from boto3.dynamodb.conditions import Key
from model import table
from sns_client import publish_sns
from sqs_client import publish_sqs
def handle(event_ids):
print(f"Scheduling {event_ids}")
events = []
for event_id in event_ids:
items = table.query(
KeyConditionExpression=Key('pk').eq(event_id['pk']) & Key('sk').eq(event_id['sk'])
).get('Items', [])
if len(items) == 0:
print('Event %s doesn\'t exist anymore' % event_id)
continue
events.append(items[0])
schedule_events(events)
def schedule_cron_events(events):
failed_cron_event = []
for item in events:
try:
publish_sns(item['target'], item['payload'])
now = datetime.utcnow()
print('cron event.emitted %s' % (json.dumps({'application': item['application'], 'eventIdentifier': item['eventIdentifier'], 'timestamp': str(now), 'scheduled': str(item['cronExpression'])})))
except Exception as e:
print(f"Failed to emit cron event {item['pk']}: {str(e)}")
failed_cron_event.append(item)
for event in failed_cron_event:
try:
if 'failure_topic' not in event:
payload = {
'error': 'ERROR',
'event': event['payload']
}
publish_sns(event.failure_topic, json.dumps(payload))
except Exception as e:
print(f"Failed to emit event {event['eventIdentifier']} to failure topic: {str(e)}")
def schedule_events(events):
successful_ids = []
failed_ids = []
to_be_scheduled = []
events_by_id = {}
for event in events:
events_by_id[event['sk']] = event
# check for cronjob using croniter
delta = datetime.fromisoformat(event['date']) - datetime.utcnow()
delay = delta.total_seconds()
rounded_delay = math.ceil(delay)
# schedule the event a second earlier to help with delays in sqs/lambda cold start
# the emitter will wait accordingly
rounded_delay -= 1
if rounded_delay < 0:
rounded_delay = 0
print(f'ID {event["sk"]} is supposed to emit in {rounded_delay}s which is {delay - rounded_delay}s before target.')
event = {
'payload': event['payload'],
'target': event['target'],
'sk': event['sk'],
'pk': int(event['pk']),
'date': event['date']
}
if 'failure_topic' in event:
event['failure_topic'] = event['failure_topic']
sqs_message = {
'Id': event['sk'],
'MessageBody': json.dumps(event),
'DelaySeconds': rounded_delay
}
to_be_scheduled.append(sqs_message)
if len(to_be_scheduled) == 10:
successes, failures = publish_sqs(os.environ.get('QUEUE_URL'), to_be_scheduled)
failed_ids.extend(failures)
successful_ids.extend(successes)
to_be_scheduled = []
successes, failures = publish_sqs(os.environ.get('QUEUE_URL'), to_be_scheduled)
failed_ids.extend(failures)
successful_ids.extend(successes)
print(f'Success: {len(successful_ids)}, Failed: {len(failed_ids)}')
for id in failed_ids:
print(f"Failed to schedule the following events: {failures}")
item = events_by_id[id]
# todo: instead of publishing the error we should reschedule it automatically
# can happen if sqs does not respond
if 'failure_topic' in item:
payload = {
'error': 'ERROR',
'event': item['payload']
}
publish_sns(item['failure_topic'], json.dumps(payload))