Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean logger lines #29

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions ochopod/bindings/ec2/kubernetes.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def boot(self, lifecycle, model=Reactive, local=0):
# - grab our environment variables
# - isolate the ones prefixed with ochopod_
#
logger.debug('environment ->\n%s' % '\n'.join(['\t%s -> %s' % (k, v) for k, v in env.items()]))
logger.debug('environment ->\n%s', '\n'.join(['\t%s -> %s' % (k, v) for k, v in env.items()]))
hints = {k[8:]: v for k, v in env.items() if k.startswith('ochopod_')}
if local or hints['local'] == 'true':

Expand Down Expand Up @@ -193,7 +193,7 @@ def _spin():
# the coordinator (especially the pod index which is derived from zookeeper)
#
latch = ThreadingFuture()
logger.info('starting %s.%s (kubernetes/ec2) @ %s' % (hints['namespace'], hints['cluster'], hints['node']))
logger.info('starting %s.%s (kubernetes/ec2) @ %s', hints['namespace'], hints['cluster'], hints['node'])
breadcrumbs = deepcopy(hints)
env.update({'ochopod': json.dumps(hints)})
executor = lifecycle.start(env, latch, hints)
Expand Down Expand Up @@ -264,12 +264,12 @@ def _control(task, timeout='60'):
try:

ts = time.time()
logger.debug('http in -> /control/%s' % task)
logger.debug('http in -> /control/%s', task)
latch = ThreadingFuture()
executor.tell({'request': task, 'latch': latch, 'data': request.data})
js, code = latch.get(timeout=int(timeout))
ms = time.time() - ts
logger.debug('http out -> HTTP %s (%d ms)' % (code, ms))
logger.debug('http out -> HTTP %s (%d ms)', code, ms)
return json.dumps(js), code

except Timeout:
Expand Down Expand Up @@ -341,6 +341,6 @@ def run(self):

except Exception as failure:

logger.fatal('unexpected condition -> %s' % diagnostic(failure))
logger.fatal('unexpected condition -> %s', diagnostic(failure))

exit(1)
exit(1)
10 changes: 5 additions & 5 deletions ochopod/bindings/ec2/marathon.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def boot(self, lifecycle, model=Reactive, local=0):
# - extract the mesos PORT_* bindings and construct a small remapping dict
#
ports = {}
logger.debug('environment ->\n%s' % '\n'.join(['\t%s -> %s' % (k, v) for k, v in env.items()]))
logger.debug('environment ->\n%s', '\n'.join(['\t%s -> %s' % (k, v) for k, v in env.items()]))
for key, val in env.items():
if key.startswith('PORT_'):
ports[key[5:]] = int(val)
Expand Down Expand Up @@ -193,7 +193,7 @@ def _dcos_deployment():
# the coordinator (especially the pod index which is derived from zookeeper)
#
latch = ThreadingFuture()
logger.info('starting %s.%s (marathon/ec2) @ %s' % (hints['namespace'], hints['cluster'], hints['node']))
logger.info('starting %s.%s (marathon/ec2) @ %s', hints['namespace'], hints['cluster'], hints['node'])
breadcrumbs = deepcopy(hints)
hints['metrics'] = {}
env.update({'ochopod': json.dumps(hints)})
Expand Down Expand Up @@ -266,12 +266,12 @@ def _control(task, timeout='60'):
try:

ts = time.time()
logger.debug('http in -> /control/%s' % task)
logger.debug('http in -> /control/%s', task)
latch = ThreadingFuture()
executor.tell({'request': task, 'latch': latch, 'data': request.data})
js, code = latch.get(timeout=int(timeout))
ms = time.time() - ts
logger.debug('http out -> HTTP %s (%d ms)' % (code, ms))
logger.debug('http out -> HTTP %s (%d ms)', code, ms)
return json.dumps(js), code

except Timeout:
Expand Down Expand Up @@ -343,4 +343,4 @@ def run(self):

except Exception as failure:

logger.fatal('unexpected condition -> %s' % diagnostic(failure))
logger.fatal('unexpected condition -> %s', diagnostic(failure))
18 changes: 9 additions & 9 deletions ochopod/core/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,14 @@ def reset(self, data):
self.connected = 0
self.force_reset = 0
self.hints['state'] = 'follower'
logger.warning('%s : actor reset (%s)' % (self.path, data.cause))
logger.warning('%s : actor reset (%s)', self.path, data.cause)
if hasattr(data, 'zk'):

#
# - gracefully shut our client down
#
data.zk.stop()
logger.debug('%s : zk client stopped, releasing resources' % self.path)
logger.debug('%s : zk client stopped, releasing resources', self.path)
data.zk.close()

if self.terminate:
Expand All @@ -105,7 +105,7 @@ def initial(self, data):
# - setup a new kazoo client
#
cnx_string = ','.join(self.brokers)
logger.debug('%s : connecting @ %s' % (self.path, cnx_string))
logger.debug('%s : connecting @ %s', self.path, cnx_string)
data.zk = KazooClient(hosts=cnx_string, timeout=5.0, read_only=0, randomize_hosts=1)
data.zk.add_listener(self.feedback)
data.zk.start()
Expand Down Expand Up @@ -155,10 +155,10 @@ def wait_for_cnx(self, data):
# and /snapshot has not been phased out yet .. this is not an issue, simply pause a bit
# to re-attempt later
#
logger.debug('%s : pod %s is already there (probably a zk reconnect)' % (self.path, self.id))
logger.debug('%s : pod %s is already there (probably a zk reconnect)', self.path, self.id)
return 'wait_for_cnx', data, 5.0 * SAMPLING

logger.debug('%s : registered as %s (#%d)' % (self.path, self.id, self.seq))
logger.debug('%s : registered as %s (#%d)', self.path, self.id, self.seq)
data.connected_at = time.time()
return 'spin', data, 0

Expand All @@ -179,9 +179,9 @@ def specialized(self, msg):
#
state = msg['state']
current = 'connected' if self.connected else 'disconnected'
logger.debug('%s : zk state change -> "%s" (%s)' % (self.path, str(state), current))
logger.debug('%s : zk state change -> "%s" (%s)', self.path, str(state), current)
if self.connected and state != KazooState.CONNECTED:
logger.warning('%s : lost connection (%s) / forcing a reset' % (self.path, str(state)))
logger.warning('%s : lost connection (%s) / forcing a reset', self.path, str(state))
self.force_reset = 1
self.connected = 0

Expand Down Expand Up @@ -278,7 +278,7 @@ def start_controller(self, data):
# - start the controller actor
#
data.latch = ThreadingFuture()
logger.debug('%s : lock acquired @ %s, now leading' % (self.path, self.prefix))
logger.debug('%s : lock acquired @ %s, now leading', self.path, self.prefix)
data.controller = self.model.start(data.zk, self.id, self.hints, self.scope, self.tag, self.port, data.latch)

return 'lock', data, 0
Expand All @@ -304,4 +304,4 @@ def lock(self, data):
except Timeout:
pass

return 'lock', data, 0
return 'lock', data, 0
8 changes: 4 additions & 4 deletions ochopod/core/fsm.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def reset(self, data):
# - pass the exception that caused the reset back to the latch or re-package it as an Aborted
# and seed its log using the failure diagnostic otherwise
#
logger.debug('%s : reset (%s)' % (self.path, data.cause))
logger.debug('%s : reset (%s)', self.path, data.cause)
self.exitcode(data.cause if isinstance(data.cause, Aborted) else Aborted(data.diagnostic))

def initial(self, data):
Expand Down Expand Up @@ -296,7 +296,7 @@ def on_receive(self, msg):
return self.specialized(msg)

except Exception as failure:
logger.debug('%s : exception trapped while handling specialized messages (%s)' % (self.path, str(failure)))
logger.debug('%s : exception trapped while handling specialized messages (%s)', self.path, str(failure))
pass

else:
Expand Down Expand Up @@ -364,7 +364,7 @@ def on_receive(self, msg):
#
if cmd['state'] == 'reset':

logger.debug('%s : exception trapped while resetting (%s)' % (self.path, str(failure)))
logger.debug('%s : exception trapped while resetting (%s)', self.path, str(failure))
_kill(self.actor_ref)
self.dying = 1

Expand All @@ -374,7 +374,7 @@ def on_receive(self, msg):
data.cause = failure
data.previous = cmd['state']
data.diagnostic = diagnostic(failure)
logger.debug('%s : exception trapped -> (%s)' % (self.path, data.diagnostic))
logger.debug('%s : exception trapped -> (%s)', self.path, data.diagnostic)
self.actor_ref.tell({'fsm': {'state': 'reset', 'data': data}})


Expand Down
46 changes: 23 additions & 23 deletions ochopod/models/piped.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def reset(self, data):
# - this happens when we need to first terminate the process
#
try:
logger.info('%s : tearing down process %s' % (self.path, data.sub.pid))
logger.info('%s : tearing down process %s', self.path, data.sub.pid)
self.hints['process'] = 'terminating'
self.tear_down(data.sub)

Expand Down Expand Up @@ -170,23 +170,23 @@ def wait_for_termination(self, data):
# - if the soft switch is on bypass the SIGKILL completely
# - this is a special case to handle peculiar scenarios
#
logger.info('%s: bypassing the forced termination (leaking pid %s)...' % (self.path, data.sub.pid))
logger.info('%s: bypassing the forced termination (leaking pid %s)...', self.path, data.sub.pid)

else:

#
# - the process is stuck, force a SIGKILL
# - silently trap any failure
#
logger.info('%s : pid %s not terminating, killing it' % (self.path, data.sub.pid))
logger.info('%s : pid %s not terminating, killing it', self.path, data.sub.pid)
try:
data.sub.kill()

except Exception as _:
pass

else:
logger.debug('%s : pid %s terminated in %d seconds' % (self.path, data.sub.pid, int(elapsed)))
logger.debug('%s : pid %s terminated in %d seconds', self.path, data.sub.pid, int(elapsed))

data.sub = None
self.hints['process'] = 'stopped'
Expand Down Expand Up @@ -255,11 +255,11 @@ def spin(self, data):
#
data.checks -= 1
data.failed = 0
logger.warning('%s : sanity check (%d/%d) failed -> %s' %
(self.path, self.checks - data.checks, self.checks, diagnostic(failure)))
logger.warning('%s : sanity check (%d/%d) failed -> %s',
self.path, self.checks - data.checks, self.checks, diagnostic(failure))

if not data.checks:
logger.warning('%s : turning pod off' % self.path)
logger.warning('%s : turning pod off', self.path)
data.checks = self.checks
self._request(['off'])

Expand All @@ -272,7 +272,7 @@ def spin(self, data):
# - a successful exit code (0) will automatically force a shutdown
# - this is a convenient way for pods go down automatically once their task is done
#
logger.error('%s : pid %s exited, shutting down' % (self.path, data.sub.pid))
logger.error('%s : pid %s exited, shutting down', self.path, data.sub.pid)
self._request(['kill'])

else:
Expand All @@ -283,7 +283,7 @@ def spin(self, data):
# - restart it gracefully
#
data.failed += 1
logger.error('%s : pid %s died (code %d), re-running' % (self.path, data.sub.pid, code))
logger.error('%s : pid %s died (code %d), re-running', self.path, data.sub.pid, code)
self._request(['off', 'on'])

else:
Expand Down Expand Up @@ -313,7 +313,7 @@ def on(self, data):
# - this is the code-path used for instance up a leader request when strict is false
#
reply = {}, 200
logger.debug('%s : skipping /control/on request' % self.path)
logger.debug('%s : skipping /control/on request', self.path)
data.latch.set(reply)

else:
Expand All @@ -329,7 +329,7 @@ def on(self, data):
# - if this is the 1st time the pod is running invoke the initialize() callback
# - this is typically used to run once-only stuff such as attaching storage volumes, etc.
#
logger.info('%s : initializing pod' % self.path)
logger.info('%s : initializing pod', self.path)
self.initialize()
self.initialized = 1

Expand All @@ -342,7 +342,7 @@ def on(self, data):
# - make sure the optional overrides set by configure() are strings
#
cluster = _Cluster(data.js)
logger.info('%s : configuring pod %d/%d' % (self.path, 1 + cluster.index, cluster.size))
logger.info('%s : configuring pod %d/%d' , self.path, 1 + cluster.index, cluster.size)
data.command, overrides = self.configure(cluster)
data.env = {key: str(value) for key, value in overrides.items()}
self.last = data.js
Expand Down Expand Up @@ -390,7 +390,7 @@ def _pipe(process):
if line == '' and code is not None:
break

logger.info('pid %s : %s' % (process.pid, line))
logger.info('pid %s : %s', process.pid, line)

out = Thread(target=_pipe, args=(data.sub,))
out.daemon = True
Expand All @@ -409,10 +409,10 @@ def _pipe(process):

data.pids += 1
self.hints['process'] = 'running'
logger.info('%s : popen() #%d -> started <%s> as pid %s' % (self.path, data.pids, data.command, data.sub.pid))
logger.info('%s : popen() #%d -> started <%s> as pid %s', self.path, data.pids, data.command, data.sub.pid)
if data.env:
unrolled = '\n'.join(['\t%s -> %s' % (k, v) for k, v in data.env.items()])
logger.debug('%s : extra environment for pid %s ->\n%s' % (self.path, data.sub.pid, unrolled))
logger.debug('%s : extra environment for pid %s ->\n%s', self.path, data.sub.pid, unrolled)

reply = {}, 200
data.latch.set(reply)
Expand All @@ -424,7 +424,7 @@ def _pipe(process):
# - the pod will shutdown automatically as well
#
reply = {}, 406
logger.warning('%s : failed to configure -> %s, shutting down' % (self.path, diagnostic(failure)))
logger.warning('%s : failed to configure -> %s, shutting down', self.path, diagnostic(failure))
self._request(['kill'])
data.latch.set(reply)

Expand All @@ -449,7 +449,7 @@ def check(self, data):
# - any failure trapped during the configuration -> HTTP 406
#
reply = {}, 406
logger.warning('%s : failed to run the pre-check -> %s' % (self.path, diagnostic(failure)))
logger.warning('%s : failed to run the pre-check -> %s', self.path, diagnostic(failure))
data.latch.set(reply)

self.commands.popleft()
Expand Down Expand Up @@ -482,7 +482,7 @@ def kill(self, data):
#
# - invoke the optional finalize() callback
#
logger.info('%s : finalizing pod' % self.path)
logger.info('%s : finalizing pod', self.path)
self.finalize()

except Exception as failure:
Expand All @@ -491,7 +491,7 @@ def kill(self, data):
# - log something if for some reason finalize() failed as we can't really recover
# - don't bother responding with a 406
#
logger.warning('%s : failed to finalize -> %s' % (self.path, diagnostic(failure)))
logger.warning('%s : failed to finalize -> %s', self.path, diagnostic(failure))

#
# - in any case request a termination and tag the pod as 'dead'
Expand All @@ -506,7 +506,7 @@ def kill(self, data):
def signal(self, data):

try:
logger.debug('%s : user signal received' % self.path)
logger.debug('%s : user signal received', self.path)
js = self.signaled(data.js, process=data.sub)
reply = js if js else {}, 200

Expand All @@ -516,7 +516,7 @@ def signal(self, data):
# - abort on a 500 upon any failure
#
reply = {}, 500
logger.warning('%s : failed to signal -> %s' % (self.path, diagnostic(failure)))
logger.warning('%s : failed to signal -> %s', self.path, diagnostic(failure))

data.latch.set(reply)
self.commands.popleft()
Expand All @@ -527,7 +527,7 @@ def ok(self, data):
try:

assert data.js, 'control/ok received out of context (leader bug ?)'
logger.debug('%s : cluster has been formed, invoking configured()' % self.path)
logger.debug('%s : cluster has been formed, invoking configured()', self.path)
cluster = _Cluster(data.js)
self.configured(cluster)
reply = {}, 200
Expand All @@ -538,7 +538,7 @@ def ok(self, data):
# - abort on a 500 upon any failure
#
reply = {}, 500
logger.warning('%s : failed to signal -> %s' % (self.path, diagnostic(failure)))
logger.warning('%s : failed to signal -> %s', self.path, diagnostic(failure))

data.latch.set(reply)
self.commands.popleft()
Expand Down
Loading