Skip to content
This repository was archived by the owner on Oct 4, 2023. It is now read-only.

Commit 01dc268

Browse files
committed
Error handling, tests, lint, beat the patriarchy slightly
1 parent f371b11 commit 01dc268

13 files changed

+238
-109
lines changed

requirements.txt

+9-9
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
requests==2.4.3
2-
Pillow==2.6.1
3-
tweepy==3.1.0
1+
requests==2.9.1
2+
Pillow==3.1.1
3+
tweepy==3.5.0
44
gdata==2.0.18
5-
pytest==2.6.4
6-
mock==1.0.1
7-
lxml==3.4.1
8-
coverage==3.7.1
9-
coveralls==0.5
10-
Pafy==0.3.74
5+
pytest==2.8.7
6+
mock==1.3.0
7+
lxml==3.5.0
8+
coverage==4.0.3
9+
coveralls==1.1
10+
Pafy==0.4.3

run_analysis

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
#!/bin/sh
22
export PYTHONPATH=src/:$PYTHONPATH
33
. venv/bin/activate
4-
54
pip install -r requirements-analysis.txt -U
6-
pep8 --ignore=E501,W503 src tests
5+
6+
pep8 --ignore=W503 src tests
77
pyflakes src tests
88
pep257 --ignore=D100,D101,D102,D103,D104,D200,D205,D211,D400 src tests
99
bandit -r src

run_tests

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
#!/bin/sh
22
export PYTHONPATH=src/:$PYTHONPATH
33
. venv/bin/activate
4-
coverage run --source src -m py.test -s tests
4+
coverage run --source src -m py.test -s tests && coverage report

setup_venv

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
#!/bin/sh
22
virtualenv venv
33
. venv/bin/activate
4-
pip install -r requirements.txt
4+
pip install -r requirements.txt -U

src/handler.py

+12-4
Original file line numberDiff line numberDiff line change
@@ -58,12 +58,20 @@ def handle_connect(self):
5858
def collect_incoming_data(self, data):
5959
self.buffer += data
6060

61-
def log_error(self, error):
61+
def log_error(self, text):
6262
with open("error.log", "a") as f:
6363
f.write(str(datetime.now()) + '\n')
64-
f.write('ERROR: ' + str(error) + '\n\n')
65-
if self.debug:
66-
print error
64+
f.write('Error: ' + text + '\n')
65+
if self.debug:
66+
print text
67+
68+
from traceback import format_exception
69+
from sys import exc_info
70+
ex_type, ex_val, ex_tb = exc_info()
71+
ex_text = ''.join(format_exception(ex_type, ex_val, ex_tb, 10))
72+
f.write(ex_text + '\n')
73+
if self.debug:
74+
print ex_text
6775

6876
def found_terminator(self):
6977
line = self.buffer

src/plugins/nsfw_image_detector.py

+12-13
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@
88
* jpeg decoder for PIL (libjpeg-dev package on Ubuntu)
99
"""
1010

11-
import re
1211
import os
1312
import uuid
1413
import tempfile
14+
import regex
1515
from os.path import join as pjoin
1616

1717
import requests
@@ -33,11 +33,6 @@
3333

3434
SKIN_PERCENTAGE_THRESHOLD = 30
3535

36-
imgur_regex = re.compile(
37-
"(?:https?://)(?:www[.])?(?:imgur.com/)"
38-
+ "(?:(?:gallery/)|(?:r/[a-z]+/))?([A-Za-z0-9]+)"
39-
)
40-
4136

4237
class NSFWImageDetectorPlugin(BotPlugin):
4338

@@ -50,7 +45,7 @@ def __init__(self, bot):
5045
self._images_dir = tempfile.mkdtemp(suffix='nsfw-images')
5146

5247
def handle_message(self, channel, nick, msg, line=None):
53-
urls = re.findall(r'(https?://[^\s]+)', msg)
48+
urls = regex.WEB_URL.findall(msg)
5449

5550
if not urls:
5651
return
@@ -102,7 +97,8 @@ def _get_skin_ratio_percentage(self, file_path):
10297
try:
10398
im = Image.open(file_path)
10499
except:
105-
self.bot.log_error('Could not open NSFW image: ' + file_path)
100+
self.bot.log_error('Could not open NSFW image: "'
101+
+ file_path + '"')
106102
return 0.0
107103

108104
im = im.convert('RGB')
@@ -132,9 +128,10 @@ def _get_image_urls(self, urls):
132128

133129
image_urls = []
134130
for url in urls:
135-
imgur_id = imgur_regex.search(url)
136-
if imgur_id:
137-
url = "https://i.imgur.com/" + imgur_id.group(1) + ".jpg"
131+
# Rewrite imgur urls
132+
imgur_res = regex.IMGUR.search(url)
133+
if imgur_res:
134+
url = "https://i.imgur.com/" + imgur_res.group('id') + ".jpg"
138135

139136
if self._is_image_url(url=url):
140137
image_urls.append(url)
@@ -154,6 +151,8 @@ def _download_image(self, url):
154151
extension = os.path.splitext(url)[1]
155152
response = requests.get(url, stream=True)
156153
except:
154+
self.bot.log_error('Failed to download NSFW image: "'
155+
+ url + '"')
157156
return
158157

159158
if not response.status_code == 200:
@@ -168,8 +167,8 @@ def _download_image(self, url):
168167
if first_chunk:
169168
first_chunk = False
170169
if not self._is_image(chunk):
171-
self.bot.log_error('NSFW image was not an image: '
172-
+ url)
170+
self.bot.log_error('NSFW image was not an image: "'
171+
+ url + '"')
173172
return
174173

175174
fp.write(chunk)

src/plugins/psywerx_history.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -12,21 +12,21 @@ def handle_message(self, channel, nick, msg, line=None):
1212
if r.startswith('REPOST'):
1313
self._handle_repost(r, channel)
1414
elif r != 'OK':
15-
self.bot.log_error("Response: " + r)
15+
self.bot.log_error("Response not OK: " + r)
1616

1717
def handle_say(self, channel, msg, line):
1818
msg = ":" + self.bot.nick + "!~" + self.bot.nick + "@6.6.6.6 " + line
1919
self.request(channel, 'irc/add', {'raw': msg})
2020

2121
def _handle_repost(self, r, channel):
22-
_, nick, repostNick, messageType, num = r.split(' ')
23-
if messageType != 'M':
22+
_, nick, repost_nick, message_type, num = r.split(' ')
23+
if message_type != 'M':
2424
return
2525

26-
response = self._pick_response(nick == repostNick, int(num) > 1)
26+
response = self._pick_response(nick == repost_nick, int(num) > 1)
2727
self.bot.say(response % {
2828
'nick': nick,
29-
'repostNick': repostNick,
29+
'repost_nick': repost_nick,
3030
'num': num
3131
}, channel)
3232

src/plugins/psywerx_karma.py

+9-4
Original file line numberDiff line numberDiff line change
@@ -33,14 +33,17 @@ def pf(number):
3333
self.bot.say(str("** CONGRATS " + p['nick'] + " **"), channel)
3434

3535
except:
36-
from traceback import format_exc
37-
self.bot.log_error('Could not get upboats: ' + str(format_exc()))
36+
self.bot.log_error('Could not get upboats.')
3837

3938
def _karma(self, tokens, channel):
4039
if len(tokens) != 1:
41-
r = json.loads(self.request(channel, 'irc/karma_nick', {}))
40+
response = self.request(channel, 'irc/karma_nick', {})
41+
if not response:
42+
msg = "Sorry, could not get karmas."
43+
self.bot.say(msg, channel)
44+
return
4245
s = ""
43-
for p in r:
46+
for p in json.loads(response):
4447
s += str(p['nick']) + " (" + str(p['karma']) + "), "
4548
self.bot.say(s[:-2], channel)
4649
else:
@@ -49,6 +52,8 @@ def _karma(self, tokens, channel):
4952
params = {'nick': nick}
5053
response = self.request(channel, 'irc/karma_nick', params)
5154
if not response:
55+
msg = "Sorry, could not get karma for " + nick + "."
56+
self.bot.say(msg, channel)
5257
return
5358
nick = tokens[0] if tokens[0] not in users else users[tokens[0]]
5459
self.bot.say(nick + " has " + response + " karma.", channel)

src/plugins/read_links.py

+28-33
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,20 @@
11
from base import BotPlugin
2-
from tweepy import OAuthHandler, API
2+
from tweepy import OAuthHandler, API as TWITTER_API
33
from settings import TWITTER as T
44
from response import random_response
5-
from regex import twt_regex, yt_regex, vimeo_regex, web_regex
65
from urllib2 import urlopen
7-
import json
86
import re
7+
import json
98
import requests
109
import lxml.html
10+
import regex
1111

12-
oauth = OAuthHandler(T['consumer_key'], T['consumer_secret'])
13-
oauth.set_access_token(T['access_token_key'], T['access_token_secret'])
14-
twt = API(oauth)
12+
TWITTER_OAUTH = OAuthHandler(T['consumer_key'], T['consumer_secret'])
13+
TWITTER_OAUTH.set_access_token(T['access_token_key'], T['access_token_secret'])
14+
TWITTER = TWITTER_API(TWITTER_OAUTH)
1515

1616
# These will be filtered out in _read_webistes
17-
__all_non_web__ = [twt_regex, yt_regex, vimeo_regex]
17+
__all_non_web__ = [regex.TWITTER, regex.YOUTUBE, regex.VIMEO]
1818

1919
VIDEO_RESPONSES = [
2020
"That video is titled '%(title)s'. "
@@ -41,25 +41,23 @@
4141

4242
class ReadLinks(BotPlugin):
4343

44-
def _get_name_text(self, status_id):
45-
status = twt.get_status(status_id)
44+
def _get_tweet_info(self, status_id):
45+
status = TWITTER.get_status(status_id)
4646
name = status.user.screen_name
4747
text = status.text.replace('\n', ' ')
4848
return name, text
4949

5050
def _read_twitter(self, channel, msg):
51-
res = twt_regex.search(msg)
52-
if not res:
51+
twt_res = regex.TWITTER.search(msg)
52+
if not twt_res:
5353
return
5454
try:
55-
(name, text) = self._get_name_text(str(res.groups()[0]))
55+
(name, text) = self._get_tweet_info(twt_res.group('id'))
5656
response = unicode("@" + name + " on Twitter says: " + text)
5757
response = response.encode('utf8')
5858
self.bot.say(response, channel)
59-
except Exception as e:
60-
print e
61-
self.bot.log_error('Could not get tweet from: "'
62-
+ msg + '" the exception was: ' + str(e))
59+
except:
60+
self.bot.log_error('Could not get tweet from: "' + msg + '"')
6361
self.bot.say('Sorry, I wasn\'t able to read the last tweet :(',
6462
channel)
6563

@@ -80,17 +78,16 @@ def _get_vimeo_info(self, video_id):
8078
}
8179

8280
def _read_vimeo(self, channel, msg):
83-
res = vimeo_regex.search(msg)
84-
if not res:
81+
vimeo_res = regex.VIMEO.search(msg)
82+
if not vimeo_res:
8583
return
8684
try:
87-
video_id = str(res.groups()[0])
88-
video_info = self._get_vimeo_info(video_id)
85+
video_info = self._get_vimeo_info(vimeo_res.group('id'))
8986
self.bot.say(random_response(VIDEO_RESPONSES) % video_info,
9087
channel)
91-
except Exception as e:
88+
except:
9289
self.bot.log_error('Could not get title of vimeo link from: "'
93-
+ msg + '" the exception was: ' + str(e))
90+
+ msg + '"')
9491
self.bot.say('For some reason I couldn\'t read the title of that '
9592
+ 'vimeo link.', channel)
9693

@@ -113,36 +110,34 @@ def _get_youtube_info(self, video_id):
113110
}
114111

115112
def _read_youtube(self, channel, msg):
116-
res = yt_regex.search(msg)
117-
if not res:
113+
yt_res = regex.YOUTUBE.search(msg)
114+
if not yt_res:
118115
return
119116
try:
120-
video_id = str(res.groups()[0])
121-
video_info = self._get_youtube_info(video_id)
117+
video_info = self._get_youtube_info(yt_res.group('id'))
122118
self.bot.say(random_response(VIDEO_RESPONSES) % video_info,
123119
channel)
124-
except Exception as e:
120+
except:
125121
self.bot.log_error('Could not get title of youtube link from: "'
126-
+ msg + '" the exception was: ' + str(e))
122+
+ msg + '"')
127123
self.bot.say('For some reason I couldn\'t read the title of that '
128124
+ 'youtube link.', channel)
129125

130126
def _read_websites(self, channel, msg):
131-
links = web_regex.findall(msg)
127+
links = regex.WEB_URL.findall(msg)
132128
for link in links:
133-
link = link[0]
134129
if [r for r in __all_non_web__ if r.search(link)]:
135130
continue
136131
try:
137-
t = lxml.html.parse(urlopen(str(link))) # nopep8 # nosec: web_regex only allows http(s)
132+
t = lxml.html.parse(urlopen(link)) # nopep8 # nosec: regex.WEB_URL only allows http(s)
138133
t = t.find(".//title").text
139134
t = t.strip().replace('\n', ' ')
140135
if len(re.sub("[^a-zA-Z0-9]", "", t)) >= 5:
141136
self.bot.say(random_response(WEB_RESPONSES) % {'title': t},
142137
channel)
143-
except Exception as e:
138+
except:
144139
self.bot.log_error('Could not get title of webpage: "'
145-
+ msg + '" the exception was: ' + str(e))
140+
+ msg + '"')
146141

147142
def handle_message(self, channel, nick, msg, line=None):
148143
if "PRIVMSG" in line:

src/plugins/uptime.py

+1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ def _uptime(self, tokens, channel):
1111
server_uptime_seconds = int(float(f.readline().split()[0]))
1212
server_uptime = str(timedelta(seconds=server_uptime_seconds))
1313
except:
14+
self.bot.log_error('Could not get server uptime.')
1415
server_uptime = "unknown"
1516

1617
bot_uptime_seconds = int(time() - self.bot.uptime)

src/regex.py

+12-11
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
11
import re
22

3-
twt_regex = re.compile(
4-
"https?://(?:www\\.)?twitter\\.com/.*/status(?:es)?/([0-9]+)")
5-
yt_regex = re.compile(
6-
"https?://(?:www\\.)?(?:youtu[.]be|youtube[.]com)/"
7-
+ "(?:embed/)?(?:[^/ ]*?[?&]v=)?([A-Za-z0-9_-]{11})(?:[^A-Za-z0-9_-]|$)")
8-
vimeo_regex = re.compile(
9-
"https?://(?:www\\.)?vimeo.com/(?:videos?/)?([0-9]+)")
10-
url_regex = re.compile(
11-
r"(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s'!()\[\]{};:'\".,<>?']))") # nopep8
12-
web_regex = re.compile(
13-
r"(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s'!()\[\]{};:'\".,<>?']))") # nopep8
3+
TWITTER = re.compile(
4+
"https?://(?:www[.])?twitter[.]com/.*/status(?:es)?/(?P<id>[0-9]+)")
5+
YOUTUBE = re.compile(
6+
"https?://(?:www[.])?(?:youtu[.]be|youtube[.]com)/(?:embed/)?(?:[^/ ]*?[?&]v=)?(?P<id>[A-Za-z0-9_-]{11})(?:[^A-Za-z0-9_-]|$)") # nopep8
7+
VIMEO = re.compile(
8+
"https?://(?:www[.])?vimeo.com/(?:videos?/)?(?P<id>[0-9]+)")
9+
IMGUR = re.compile(
10+
"https?://(?:www[.])?imgur.com/(?:(?:gallery/)|(?:r/[a-z]+/))?(?P<id>[A-Za-z0-9]+)") # nopep8
11+
ANY_URL = re.compile(
12+
r"(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\((?:[^\s()<>]+|(?:\([^\s()<>]+\)))*\))+(?:\((?:[^\s()<>]+|(?:\([^\s()<>]+\)))*\)|[^\s'!()\[\]{};:'\".,<>?']))") # nopep8
13+
WEB_URL = re.compile(
14+
r"(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\((?:[^\s()<>]+|(?:\([^\s()<>]+\)))*\))+(?:\((?:[^\s()<>]+|(?:\([^\s()<>]+\)))*\)|[^\s'!()\[\]{};:'\".,<>?']))") # nopep8

0 commit comments

Comments
 (0)