1
1
from base import BotPlugin
2
- from tweepy import OAuthHandler , API
2
+ from tweepy import OAuthHandler , API as TWITTER_API
3
3
from settings import TWITTER as T
4
4
from response import random_response
5
- from regex import twt_regex , yt_regex , vimeo_regex , web_regex
6
5
from urllib2 import urlopen
7
- import json
8
6
import re
7
+ import json
9
8
import requests
10
9
import lxml .html
10
+ import regex
11
11
12
- oauth = OAuthHandler (T ['consumer_key' ], T ['consumer_secret' ])
13
- oauth .set_access_token (T ['access_token_key' ], T ['access_token_secret' ])
14
- twt = API ( oauth )
12
+ TWITTER_OAUTH = OAuthHandler (T ['consumer_key' ], T ['consumer_secret' ])
13
+ TWITTER_OAUTH .set_access_token (T ['access_token_key' ], T ['access_token_secret' ])
14
+ TWITTER = TWITTER_API ( TWITTER_OAUTH )
15
15
16
16
# These will be filtered out in _read_webistes
17
- __all_non_web__ = [twt_regex , yt_regex , vimeo_regex ]
17
+ __all_non_web__ = [regex . TWITTER , regex . YOUTUBE , regex . VIMEO ]
18
18
19
19
VIDEO_RESPONSES = [
20
20
"That video is titled '%(title)s'. "
41
41
42
42
class ReadLinks (BotPlugin ):
43
43
44
- def _get_name_text (self , status_id ):
45
- status = twt .get_status (status_id )
44
+ def _get_tweet_info (self , status_id ):
45
+ status = TWITTER .get_status (status_id )
46
46
name = status .user .screen_name
47
47
text = status .text .replace ('\n ' , ' ' )
48
48
return name , text
49
49
50
50
def _read_twitter (self , channel , msg ):
51
- res = twt_regex .search (msg )
52
- if not res :
51
+ twt_res = regex . TWITTER .search (msg )
52
+ if not twt_res :
53
53
return
54
54
try :
55
- (name , text ) = self ._get_name_text ( str ( res . groups ()[ 0 ] ))
55
+ (name , text ) = self ._get_tweet_info ( twt_res . group ( 'id' ))
56
56
response = unicode ("@" + name + " on Twitter says: " + text )
57
57
response = response .encode ('utf8' )
58
58
self .bot .say (response , channel )
59
- except Exception as e :
60
- print e
61
- self .bot .log_error ('Could not get tweet from: "'
62
- + msg + '" the exception was: ' + str (e ))
59
+ except :
60
+ self .bot .log_error ('Could not get tweet from: "' + msg + '"' )
63
61
self .bot .say ('Sorry, I wasn\' t able to read the last tweet :(' ,
64
62
channel )
65
63
@@ -80,17 +78,16 @@ def _get_vimeo_info(self, video_id):
80
78
}
81
79
82
80
def _read_vimeo (self , channel , msg ):
83
- res = vimeo_regex .search (msg )
84
- if not res :
81
+ vimeo_res = regex . VIMEO .search (msg )
82
+ if not vimeo_res :
85
83
return
86
84
try :
87
- video_id = str (res .groups ()[0 ])
88
- video_info = self ._get_vimeo_info (video_id )
85
+ video_info = self ._get_vimeo_info (vimeo_res .group ('id' ))
89
86
self .bot .say (random_response (VIDEO_RESPONSES ) % video_info ,
90
87
channel )
91
- except Exception as e :
88
+ except :
92
89
self .bot .log_error ('Could not get title of vimeo link from: "'
93
- + msg + '" the exception was: ' + str ( e ) )
90
+ + msg + '"' )
94
91
self .bot .say ('For some reason I couldn\' t read the title of that '
95
92
+ 'vimeo link.' , channel )
96
93
@@ -113,36 +110,34 @@ def _get_youtube_info(self, video_id):
113
110
}
114
111
115
112
def _read_youtube (self , channel , msg ):
116
- res = yt_regex .search (msg )
117
- if not res :
113
+ yt_res = regex . YOUTUBE .search (msg )
114
+ if not yt_res :
118
115
return
119
116
try :
120
- video_id = str (res .groups ()[0 ])
121
- video_info = self ._get_youtube_info (video_id )
117
+ video_info = self ._get_youtube_info (yt_res .group ('id' ))
122
118
self .bot .say (random_response (VIDEO_RESPONSES ) % video_info ,
123
119
channel )
124
- except Exception as e :
120
+ except :
125
121
self .bot .log_error ('Could not get title of youtube link from: "'
126
- + msg + '" the exception was: ' + str ( e ) )
122
+ + msg + '"' )
127
123
self .bot .say ('For some reason I couldn\' t read the title of that '
128
124
+ 'youtube link.' , channel )
129
125
130
126
def _read_websites (self , channel , msg ):
131
- links = web_regex .findall (msg )
127
+ links = regex . WEB_URL .findall (msg )
132
128
for link in links :
133
- link = link [0 ]
134
129
if [r for r in __all_non_web__ if r .search (link )]:
135
130
continue
136
131
try :
137
- t = lxml .html .parse (urlopen (str ( link ))) # nopep8 # nosec: web_regex only allows http(s)
132
+ t = lxml .html .parse (urlopen (link )) # nopep8 # nosec: regex.WEB_URL only allows http(s)
138
133
t = t .find (".//title" ).text
139
134
t = t .strip ().replace ('\n ' , ' ' )
140
135
if len (re .sub ("[^a-zA-Z0-9]" , "" , t )) >= 5 :
141
136
self .bot .say (random_response (WEB_RESPONSES ) % {'title' : t },
142
137
channel )
143
- except Exception as e :
138
+ except :
144
139
self .bot .log_error ('Could not get title of webpage: "'
145
- + msg + '" the exception was: ' + str ( e ) )
140
+ + msg + '"' )
146
141
147
142
def handle_message (self , channel , nick , msg , line = None ):
148
143
if "PRIVMSG" in line :
0 commit comments