Skip to content

Commit

Permalink
Run flake when running tests
Browse files Browse the repository at this point in the history
Also fix a typo in an error message
  • Loading branch information
sloria committed Dec 2, 2017
1 parent ef26d64 commit 1113403
Show file tree
Hide file tree
Showing 12 changed files with 43 additions and 16 deletions.
3 changes: 1 addition & 2 deletions dev-requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
nose>=1.3.0
tox>=2.6.0
wheel
twine
invoke>=0.15.0
mock
flake8==3.5.0
9 changes: 8 additions & 1 deletion run_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,11 @@
python run_tests.py no-internet
'''
from __future__ import unicode_literals
import nose
import subprocess
import sys

import nose

from textblob.compat import PY2

PY26 = PY2 and int(sys.version_info[1]) < 7
Expand All @@ -22,6 +25,9 @@

def main():
args = get_argv()
retcode = subprocess.call(['flake8', 'textblob'])
if retcode:
sys.exit(1)
success = nose.run(argv=args)
sys.exit(0) if success else sys.exit(1)

Expand Down Expand Up @@ -59,5 +65,6 @@ def get_argv():
args.extend(["-A", attr_expression])
return args


if __name__ == '__main__':
main()
16 changes: 15 additions & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,19 @@
universal = 1

[flake8]
ignore = E501,E127,E128,E265,E302
ignore = E501,E127,E128,E265,E302,E266
max-line-length = 90
exclude =
.git,
.ropeproject,
.tox,
docs,
.git,
build,
env,
venv,
# Exclude vendorized code
textblob/en,
textblob/unicodecsv,
textblob/_text.py,
textblob/compat.py
1 change: 1 addition & 0 deletions textblob/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def itokenize(self, text, *args, **kwargs):

##### SENTIMENT ANALYZERS ####


DISCRETE = 'ds'
CONTINUOUS = 'co'

Expand Down
2 changes: 1 addition & 1 deletion textblob/blob.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,7 @@ def sentiment_assessments(self):
:rtype: namedtuple of the form ``Sentiment(polarity, subjectivity,
assessments)``
"""
return self.analyzer.analyze(self.raw,keep_assessments=True)
return self.analyzer.analyze(self.raw, keep_assessments=True)

@cached_property
def polarity(self):
Expand Down
6 changes: 3 additions & 3 deletions textblob/classifiers.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ def basic_extractor(document, train_set):
try:
assert(isinstance(el_zero[0], basestring))
word_features = _get_words_from_dataset(chain([el_zero], train_set))
except:
raise ValueError('train_set is proabably malformed.')
except Exception:
raise ValueError('train_set is probably malformed.')

tokens = _get_document_tokens(document)
features = dict(((u'contains({0})'.format(word), (word in tokens))
Expand Down Expand Up @@ -136,7 +136,7 @@ def __init__(self, train_set, feature_extractor=basic_extractor, format=None, **
self.train_set = self._read_data(train_set, format)
else: # train_set is a list of tuples
self.train_set = train_set
self._word_set = _get_words_from_dataset(self.train_set) #Keep a hidden set of unique words.
self._word_set = _get_words_from_dataset(self.train_set) # Keep a hidden set of unique words.
self.train_features = None

def _read_data(self, dataset, format=None):
Expand Down
1 change: 1 addition & 0 deletions textblob/download_corpora.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,5 +46,6 @@ def main():
download_all()
print("Finished.")


if __name__ == '__main__':
main()
14 changes: 8 additions & 6 deletions textblob/en/sentiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,23 +23,25 @@ class PatternAnalyzer(BaseSentimentAnalyzer):
where [assessments] is a list of the assessed tokens and their
polarity and subjectivity scores
"""

kind = CONTINUOUS
# This is only here for backwards-compatibility.
# The return type is actually determined upon calling analyze()
RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity'])

def analyze(self, text, keep_assessments=False):
"""Return the sentiment as a named tuple of the form:
``Sentiment(polarity, subjectivity, [assessments])``.
"""
#: Return type declaration
if keep_assessments:
RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity', 'assessments'])
Sentiment = namedtuple('Sentiment', ['polarity', 'subjectivity', 'assessments'])
assessments = pattern_sentiment(text).assessments
polarity,subjectivity = pattern_sentiment(text)
return RETURN_TYPE( polarity,subjectivity,assessments )
polarity, subjectivity = pattern_sentiment(text)
return Sentiment(polarity, subjectivity, assessments)

else:
RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity'])
return RETURN_TYPE(*pattern_sentiment(text))
Sentiment = namedtuple('Sentiment', ['polarity', 'subjectivity'])
return Sentiment(*pattern_sentiment(text))


def _default_feature_extractor(words):
Expand Down
2 changes: 2 additions & 0 deletions textblob/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ class TextBlobError(Exception):
"""A TextBlob-related error."""
pass


TextBlobException = TextBlobError # Backwards compat

class MissingCorpusError(TextBlobError):
Expand All @@ -25,6 +26,7 @@ class MissingCorpusError(TextBlobError):
def __init__(self, message=MISSING_CORPUS_MESSAGE, *args, **kwargs):
super(MissingCorpusError, self).__init__(message, *args, **kwargs)


MissingCorpusException = MissingCorpusError # Backwards compat

class DeprecationError(TextBlobError):
Expand Down
1 change: 1 addition & 0 deletions textblob/formats.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ def detect(cls, stream):
except ValueError:
return False


_registry = OrderedDict([
('csv', CSV),
('json', JSON),
Expand Down
1 change: 1 addition & 0 deletions textblob/tokenizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ def tokenize(self, text):
'''Return a list of sentences.'''
return nltk.tokenize.sent_tokenize(text)


#: Convenience function for tokenizing sentences
sent_tokenize = SentenceTokenizer().itokenize

Expand Down
3 changes: 1 addition & 2 deletions textblob/translate.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,7 @@ def _unescape(text):
"""Unescape unicode character codes within a string.
"""
pattern = r'\\{1,2}u[0-9a-fA-F]{4}'
decode = lambda x: codecs.getdecoder('unicode_escape')(x.group())[0]
return re.sub(pattern, decode, text)
return re.sub(pattern, lambda x: codecs.getdecoder('unicode_escape')(x.group())[0], text)


def _calculate_tk(source):
Expand Down

0 comments on commit 1113403

Please sign in to comment.