Skip to content

Commit

Permalink
Merge branch 'master' of github.com:nardeas/glados
Browse files Browse the repository at this point in the history
  • Loading branch information
Teemu Taskula committed Nov 26, 2017
2 parents 146ffb3 + 479bc9f commit d18c047
Show file tree
Hide file tree
Showing 18 changed files with 192 additions and 53 deletions.
33 changes: 33 additions & 0 deletions cnn_model/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from flask import Flask, request, jsonify
from utils import crop_frames, get_crop_sign, store_img, process_image
from cnn_model import predict
import numpy as np
import time
app = Flask(__name__)

@app.route('/predict/<id>', methods=['GET'])
def predict_sign(id):
full_path = "/tmp/{0}-full.jpg".format(id)
crop_path = "/tmp/{0}".format(id)
cropped_img = get_crop_sign(full_path)

if not cropped_img:
return jsonify({
'image': None,
'valid': False
})

store_img(path=crop_path, image=cropped_img)
label = predict(np.array(process_image(cropped_img)))

data = {'id': id,
'timestamp': int(time.time()),
'image': crop_path,
'image_full': full_path,
'probability': 0.0,
'valid': label[0]}

return jsonify(data)

if __name__ == '__main__':
app.run(port=8080)
55 changes: 14 additions & 41 deletions cnn_model/cnn_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from utils import read_data
from sklearn.model_selection import train_test_split
import numpy as np

data = read_data()
X_train = data["x"]
Expand All @@ -22,7 +23,7 @@
def conv_net(features, reuse, n_classes, is_training):
# Define a scope for reusing the variables
with tf.variable_scope('ConvNet', reuse=reuse):
x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3), name="feature_input")
x = features['images']

# Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x32.
Expand All @@ -35,15 +36,11 @@ def conv_net(features, reuse, n_classes, is_training):

# Fully connected layer (in tf contrib folder for now)
out = tf.layers.dense(fc1, units=n_classes, name='out')
return out, x
return out

logits_train_x = None
logits_test_x = None
def model_fn(features, labels, mode):
global logits_train_x
global logits_test_x
logits_train, logits_train_x = conv_net(features, False, n_classes=num_classes, is_training=True)
logits_test, logits_test_x = conv_net(features, True, n_classes=num_classes, is_training=False)
logits_train = conv_net(features, False, n_classes=num_classes, is_training=True)
logits_test = conv_net(features, True, n_classes=num_classes, is_training=False)

# Predictions
pred_classes = tf.argmax(logits_test, axis=1)
Expand All @@ -63,6 +60,7 @@ def model_fn(features, labels, mode):
# Evaluate the accuracy of the model
acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)


# TF Estimators requires to return a EstimatorSpec, that specify
# the different ops for training, evaluating, ...
estim_specs = tf.estimator.EstimatorSpec(
Expand All @@ -81,43 +79,18 @@ def model_fn(features, labels, mode):
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': X_train}, y=y_train,
batch_size=batch_size, num_epochs=None, shuffle=True)

# Train the Model
model.train(input_fn, steps=num_steps)
#model.train(input_fn, steps=num_steps)

# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': X_train}, y=y_train,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
e = model.evaluate(input_fn)
def predict(image_array):
input_fn = tf.estimator.inputs.numpy_input_fn(x={'images': np.array([image_array], dtype=np.float32)}, num_epochs=1, shuffle=False)
return list(model.predict(input_fn))

validation_data = read_data(path='validation/', true_label=None)
X_val = validation_data['x']
y_val = validation_data['y']

# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': X_val}, y=y_val,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
e = model.evaluate(input_fn)
print("Stop signs", e)
# Validate model
# input_fn = tf.estimator.inputs.numpy_input_fn(x={'images': X_val}, num_epochs=1, shuffle=False)
# v = list(model.predict(input_fn))
# print("Predicted stop signs", v)
# print("Testing Accuracy:", e['accuracy'])

validation_data = read_data(path='validation_false/', true_label="asd")
X_val = validation_data['x']
y_val = validation_data['y']

input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': X_val}, y=y_val,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
e = model.evaluate(input_fn)
print("No stop signs", e)

validation_data_false = read_data(path='validation_false/', true_label="asd")
X_val_false = validation_data_false['x']
y_val_false = validation_data_false['y']
6 changes: 5 additions & 1 deletion cnn_model/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,18 +1,22 @@
appnope==0.1.0
backports.functools-lru-cache==1.4
backports.shutil-get-terminal-size==1.0.0
backports.weakref==1.0.post1
bleach==1.5.0
click==6.7
cycler==0.10.0
decorator==4.1.2
enum34==1.1.6
Flask==0.12.2
funcsigs==1.0.2
futures==3.1.1
html5lib==0.9999999
ipdb==0.10.3
ipython==5.5.0
ipython-genutils==0.2.0
itsdangerous==0.24
Jinja2==2.10
Markdown==2.6.9
MarkupSafe==1.0
matplotlib==2.1.0
mock==2.0.0
mutable==0.2.2
Expand Down
Binary file not shown.
Binary file not shown.
6 changes: 3 additions & 3 deletions cnn_model/trained_model/graph.pbtxt
Original file line number Diff line number Diff line change
Expand Up @@ -982,7 +982,7 @@ node {
}
}
node {
name: "ConvNet/Placeholder"
name: "ConvNet/feature_input"
op: "Placeholder"
input: "^add"
attr {
Expand Down Expand Up @@ -3580,7 +3580,7 @@ node {
}
}
node {
name: "ConvNet_1/Placeholder"
name: "ConvNet_1/feature_input"
op: "Placeholder"
input: "^add"
attr {
Expand Down Expand Up @@ -14003,7 +14003,7 @@ node {
dtype: DT_STRING
tensor_shape {
}
string_val: "_temp_ab0101c4d73346ed92c4b54680eaf431/part"
string_val: "_temp_0856d67a756543af91b0ed2baec2438f/part"
}
}
}
Expand Down
Binary file modified cnn_model/trained_model/model.ckpt-1.data-00000-of-00001
Binary file not shown.
Binary file modified cnn_model/trained_model/model.ckpt-1.index
Binary file not shown.
Binary file modified cnn_model/trained_model/model.ckpt-1.meta
Binary file not shown.
Binary file modified cnn_model/trained_model/model.ckpt-2000.data-00000-of-00001
Binary file not shown.
Binary file modified cnn_model/trained_model/model.ckpt-2000.index
Binary file not shown.
Binary file modified cnn_model/trained_model/model.ckpt-2000.meta
Binary file not shown.
16 changes: 13 additions & 3 deletions cnn_model/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def read_data(path='GTSRB/Final_Training/Images/*/', shape=(32, 32), true_label=
if true_label is None:
print(filename)

if label is not 1 and i > 150:
if label is not 1 and i > 200:
break

im = Image.open(filename)
Expand All @@ -31,8 +31,18 @@ def read_data(path='GTSRB/Final_Training/Images/*/', shape=(32, 32), true_label=

return {"x": np.array(X, dtype=np.float32), "y": np.array(Y, dtype=np.float32)}

def process_image(im, shape=(32, 32)):
im = im.convert('RGB')
return im.resize(shape, Image.LANCZOS)

def crop_sign(img_path):
def get_image_array(path):
im = Image.open(path)
return np.array(process_image(im))

def store_img(path, image, format="jpg"):
image.save(path + '.' + format)

def get_crop_sign(img_path):
img = cv2.imread(img_path)
original_img = img.copy()
final_img = Image.open(img_path)
Expand Down Expand Up @@ -120,7 +130,7 @@ def crop_sign(img_path):

def crop_frames(filenames):
for i, file in enumerate(filenames):
image = crop_sign(file)
image = get_crop_sign(file)
if image:
image.save("crop" + str(i) + '.bmp')
else:
Expand Down
11 changes: 8 additions & 3 deletions docker/server/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ FROM glados:tf

# Setup env
ENV NODE_VERSION=8.6.0 NPM_VERSION=5
ENV RM_DIRS=/usr/include

# Install node & npm
RUN echo "Collecting dependencies..." && \
Expand Down Expand Up @@ -49,7 +48,7 @@ RUN echo "Collecting dependencies..." && \
\
echo "Cleaning up..." && \
\
rm -rf ${RM_DIRS} /node-v${NODE_VERSION}* /usr/share/man /tmp/* \
rm -rf /node-v${NODE_VERSION}* /usr/share/man /tmp/* \
/root/.npm /root/.node-gyp /root/.gnupg /usr/lib/node_modules/npm/man \
/usr/lib/node_modules/npm/doc /usr/lib/node_modules/npm/html /usr/lib/node_modules/npm/scripts && \
echo "Version info - node=$(node -v), npm=$(npm -v)"
Expand All @@ -61,9 +60,15 @@ RUN pip install opencv-python
WORKDIR /opt
COPY ./entry.sh /opt/entry.sh
COPY ./pkg.tar.gz /opt/pkg.tar.gz
COPY ./cnn.tar.gz /opt/cnn.tar.gz

# Unpack
RUN tar -zxvf /opt/pkg.tar.gz && \
RUN tar -zxvf /opt/cnn.tar.gz && \
tar -zxvf /opt/pkg.tar.gz

RUN cd /opt/cnn_model && \
pip install -r requirements.txt && \
\
cd /opt/server && \
npm i && \
chmod a+x /opt/* && \
Expand Down
30 changes: 30 additions & 0 deletions server/geo.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
// Returns radians from degrees
const radians = d => (d * Math.PI / 180);

// Returns degrees from radians
const degrees = r => (r * 180 / Math.PI);

// Returns the haversine distance in km between two coords
const distance = (lat1, lng1, lat2, lng2) => {

// Get deltas
const dLat = radians(lat2-lat1);
const dLon = radians(lng2-lng1);

// Convert to radians
lat1 = radians(lat1);
lat2 = radians(lat2);

const a = (Math.sin(dLat/2) * Math.sin(dLat/2) +
(Math.sin(dLon/2) * Math.sin(dLon/2) *
Math.cos(lat1) * Math.cos(lat2)));

return 6371 * 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a));
}

// Export all
module.exports = {
radians,
degress,
distance,
}
65 changes: 63 additions & 2 deletions server/index.js
Original file line number Diff line number Diff line change
@@ -1,10 +1,18 @@
const fs = require('fs');
const axios = require('axios');
const server = require('./server');
const http = require('./http');

// Base path
const { BASE_PATH = 'localhost:8000' } = process.env;

// Collect api and streams
const { sendEvent } = server;
const { connections, messages, errors, options } = server;

// Create history storage
const history = {};

// New connections
connections.subscribe(([ws, chan]) => {
console.log(`* [chan:${chan}] new connection ${ws._socket.remoteAddress}`);
Expand All @@ -17,9 +25,62 @@ messages.subscribe(([evt, chan]) => {

// Event: capture image
messages
.filter(([evt, _]) => evt.type === 'MARK_CAPTURE')
.filter(([evt, _]) => evt.type === 'MARK_SEND')
.subscribe(([evt, chan]) => {
console.log(evt.payload);

const { payload } = evt;

// IN:
// {
// id: 4575917962,
// timestamp: 1511649981213,
// lat: 60.299,
// lng: 24.2119,
// data: <base64>
// }

// Let's hack it together
if (payload.data) {

// Create filename
const filename = `/tmp/${payload.id}-full.jpg`;

// Write base64 data to JPG file
fs.writeFile(filename, payload.data, 'base64', err => {
if (err) {
return console.log(err);
}

// Log new image
console.log(`* got new image ${filename}`);

// Try predict
axios
.get(`http://localhost:8080/predict/${payload.id}`)
.then(res => {

console.log(res.data);

if (res.data.valid) {
// Respond
sendEvent(chan, 'MARK_RECEIVE', {
id: payload.id,
timestamp: +new Date(),
lat: payload.lat,
lng: payload.lng,
image: `http://${BASE_PATH}/static/${payload.id}-full.jpg`,
image_full: `http://${BASE_PATH}/static/${payload.id}-full.jpg`,
probability: 0,
valid: true
});
}
})
.catch(err => {
console.log(err);
})
});
}
});

console.log('>> WS server started on port', options.port);
console.log('-- base path:', BASE_PATH)
Loading

0 comments on commit d18c047

Please sign in to comment.