-
Notifications
You must be signed in to change notification settings - Fork 0
/
sf.py
149 lines (121 loc) · 3.94 KB
/
sf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
#!/usr/bin/python
"""
Stuff to export data from sf.net
"""
import datetime
import requests
import StringIO
def parse_ts(ts):
#2013-08-10 23:36:31.812000
fmt = "%Y-%m-%d %H:%M:%S.%f"
return datetime.datetime.strptime(ts, fmt)
def get_list(group):
"""
Get a list of tickets for that group
"""
# FIXME: need to continue the query somehow
url = 'https://sourceforge.net/rest/p/pywikipediabot/' + group
r = requests.get(url)
return r.json()
def iter_tickets(group):
stuff = get_list(group)
for thing in stuff['tickets']:
yield Ticket(group, thing['ticket_num'])
class Ticket:
def __init__(self, group, number):
self.group = group
self.id = number
def api(self):
"""
API endpoint for this specific ticket
"""
return 'https://sourceforge.net/rest/p/pywikipediabot/{0}/{1}'.format(self.group, self.id)
def thread_api(self):
"""
API endpoint for the "discussion thread"
"""
return self.api() + '/_discuss/thread/{0}/new'.format(self.json['ticket']['discussion_thread']['_id'])
def human_url(self):
"""
The url for humans to use
"""
return 'http://sourceforge.net/p/pywikipediabot/{0}/{1}/'.format(self.group, self.id)
def get(self):
if not hasattr(self, '_json'):
r = requests.get(self.api())
self._json = r.json()
return self._json
@property
def json(self):
"""
JSON representation of the ticket.
Will fetch if needed
"""
if not hasattr(self, '_json'):
self.get()
return self._json
def description(self):
return t.json['ticket']['description']
def summary(self):
return t.json['ticket']['summary']
def comments(self):
for cmt in self.json['ticket']['discussion_thread']['posts']:
yield cmt['text']
def is_open(self):
"""
Works for the most part, but is_not_closed is better I think.
"""
return self.json['ticket']['status'].startswith('open')
def is_not_closed(self):
return not self.json['ticket']['status'].startswith('closed')
def add_comment(self, text):
# TODO: Test this
params = {'text': text}
r = requests.post(self.thread_api(), params)
print 'Added comment.'
def iter_attachments(self):
"""
Yields urls for each attachment.
Apparently you can't get them over HTTPS, so force HTTP
"""
for attachment in self.json['ticket']['attachments']:
yield attachment['url'].replace('https://', 'http://')
def fetch_attachments(self):
"""
Actually fetches the attachments.
returns tuple of str, StringIO.StringIO
"""
for url in self.iter_attachments():
r = requests.get(url)
s = StringIO.StringIO()
s.write(r.text)
s.seek(0) # reset
yield url, s
def export(self):
# TODO: Is this good?
t = ''
t += 'Originally from: {0}\n'.format(self.human_url())
t += 'Created on: {0}\n'.format(parse_ts(self.json['ticket']['created_date']))
t += 'Subject: {0}\n'.format(self.summary())
t += 'Original description:\n{0}\n'.format(self.description())
for cmt in self.comments():
t += '---------\n'
t += cmt + '\n'
return t
def testticket():
t = Ticket('bugs', 1653)
print 'Fetching {0}...'.format(t.human_url())
print 'Subject: {0}'.format(t.summary())
print 'Created on: {0}'.format(parse_ts(t.json['ticket']['created_date']))
print t.description()
for cmt in t.comments():
print '------'
print cmt
if __name__ == '__main__':
i = iter_tickets('bugs')
for t in i:
if not t.is_not_closed():
print t.human_url()
print '----'
print t.export()
print '----'