forked from Bakerai/Bakerai
-
Notifications
You must be signed in to change notification settings - Fork 0
/
unit-testing.py
83 lines (78 loc) · 3.56 KB
/
unit-testing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import unittest
import main as m
import json
import pathlib
# Define some constants which we may use for our testing
NEGATIVE_TESTS = ["this is a bad terrible awful low quality product", "you are doing a terrible job", "you are bad at this", "horrible assistant ", "you are not useful at all","you are terrible at your job "]
DEFAULT_TESTS = ["what the" , "tell me more", "what is meaning", "did the chicken come before the egg", "how much does a burger cost"]
class testBotMethods(unittest.TestCase):
def __init__(self):
self.loaded_clf = m.load_sentiment_analysis()[0]
# Take in basic paramaters
with open(f'{pathlib.Path(__file__).parent.absolute()}\\intents.json') as jsonFile:
self.data = json.load(jsonFile)
# Test intents for accuracy
def testIntent(self):
print("Negative intentent testing:")
totalSuccess = 0
totalFail = 0
# ensure an extremely negative response is negative
for test in NEGATIVE_TESTS:
modelResponse = m.getFinalOutput(self.loaded_clf, test)
try:
self.assertTrue(modelResponse in m.NEGATIVE_RESPONSES)
totalSuccess += 1
except AssertionError as e:
print(f'AI response failed test for input: "{sampleInput}"')
totalFail+= 1
if(totalFail == 0):
print("----No tests failed----")
print(f'Passed {totalSuccess} tests, failed {totalFail} tests.\n')
# Test responses for accuracy
def testResponses(self):
print("Accurate Response testing:")
# ensure we can get desired responses for very typical questions
totalSuccess = 0
totalFail = 0
for intent in self.data["intents"]:
possibleOutputs = []
for responses in intent["responses"]:
possibleOutputs.append(responses)
for pattern in intent["patterns"]:
#sampleInput = one of the input sentences for this tag
sampleInput = pattern
modelResponse = m.getFinalOutput(self.loaded_clf, sampleInput)
#possibleOutputs = list of all outputs for that tag
try:
self.assertTrue(modelResponse in possibleOutputs)
totalSuccess+=1
except AssertionError as e:
print(f'AI response failed test for input: "{sampleInput}"')
totalFail+= 1
if(totalFail == 0):
print("----No tests failed----")
print(f'Passed {totalSuccess} tests, failed {totalFail} tests.\n')
# Testing default responses are accurate
def testDefault(self):
# ensure we can get a default response when not discussing any subjects
print("Default Response testing:")
totalSuccess = 0
totalFail = 0
for test in DEFAULT_TESTS:
modelResponse = m.getFinalOutput(self.loaded_clf, test)
try:
self.assertTrue(modelResponse in m.DEFAULT_RESPONSES)
totalSuccess += 1
except AssertionError as e:
print(f'AI response failed test for input: "{test}"')
totalFail += 1
if(totalFail == 0):
print("----No tests failed----")
print(f'Passed {totalSuccess} tests, failed {totalFail} tests.\n')
# We are running this file directly, and want to run the test
if __name__ == '__main__':
sampleTest = testBotMethods()
print("\n----Test Results----")
sampleTest.testDefault()
sampleTest.testIntent()
sampleTest.testResponses()