Skip to content

Commit

Permalink
better tests
Browse files Browse the repository at this point in the history
  • Loading branch information
ceccocats committed Jul 26, 2017
1 parent e7a6f1f commit 483ffef
Show file tree
Hide file tree
Showing 9 changed files with 170 additions and 75 deletions.
8 changes: 4 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ add_library(tkDNN SHARED src/Layer.cpp src/LayerWgs.cpp
src/Network.cpp src/utils.cpp)
target_link_libraries(tkDNN kernels ${CUDA_LIBRARIES} ${CUDA_CUBLAS_LIBRARIES} -lcudnn)

add_executable(tkDNNtest tests/test.cpp)
target_link_libraries(tkDNNtest tkDNN)
add_executable(test_simple tests/test/test.cpp)
target_link_libraries(test_simple tkDNN)

add_executable(mnist tests/mnist/test.cpp)
target_link_libraries(mnist tkDNN)
add_executable(test_mnist tests/mnist/test.cpp)
target_link_libraries(test_mnist tkDNN)
11 changes: 11 additions & 0 deletions tests/build_models.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash
echo "build test Model"
cd test
python test_model.py
cd ..
cd mnist
python mnist_model.py
cd ..
echo "export weights"
python weights_exporter.py test/net.h5 --output test/layers
python caffe_weights_exporter.py mnist/lenet.prototxt mnist/lenet.caffemodel --output mnist/layers
1 change: 0 additions & 1 deletion tests/caffe_weights_exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
help='Path to caffemodel file')

parser.add_argument('--output', type=str, help="output directory", default="layers")
parser.add_argument('--test_db', type=str, help="input db to test", default=None)

args = parser.parse_args()

Expand Down
32 changes: 32 additions & 0 deletions tests/mnist/mnist_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#!/usr/bin/env python
# mail: admin@9crk.com
# author: 9crk.from China.ShenZhen
# time: 2017-03-22

import caffe
import numpy as np
import cv2
import sys
import Image
import matplotlib.pyplot as plt

model = 'lenet.prototxt';
weights = 'lenet.caffemodel';
net = caffe.Net(model,weights,caffe.TEST);
caffe.set_mode_gpu()
img = np.array(np.random.rand(28,28), dtype=np.float32)
#revert the image,and normalize it to 0-1 range

print "INPUT: ", img
img.tofile("input.bin", format="f")
print "SHAPE: ", np.shape(img)
out = net.forward_all(data=np.asarray([img]))

out = out[out.keys()[0]]
print out
print np.shape(out)
out.tofile("output.bin", format="f")
#print out['prob'][0]
#print out['prob'][0].argmax()


58 changes: 58 additions & 0 deletions tests/mnist/test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#include<iostream>
#include "tkdnn.h"

const char *input_bin = "../tests/mnist/input.bin";
const char *c0_bin = "../tests/mnist/layers/Convolution0.bin";
const char *c0_bias_bin = "../tests/mnist/layers/Convolution0.bias.bin";
const char *c1_bin = "../tests/mnist/layers/Convolution1.bin";
const char *c1_bias_bin = "../tests/mnist/layers/Convolution1.bias.bin";
const char *d2_bin = "../tests/mnist/layers/InnerProduct2.bin";
const char *d2_bias_bin = "../tests/mnist/layers/InnerProduct2.bias.bin";
const char *d3_bin = "../tests/mnist/layers/InnerProduct3.bin";
const char *d3_bias_bin = "../tests/mnist/layers/InnerProduct3.bias.bin";
const char *output_bin = "../tests/mnist/output.bin";

int main() {

// Network layout
tkDNN::Network net;
tkDNN::dataDim_t dim(1, 1, 28, 28, 1);
tkDNN::Layer *l;
l = new tkDNN::Conv2d (&net, dim, 20, 5, 5, 1, 1, c0_bin, c0_bias_bin);
l = new tkDNN::Pooling (&net, l->output_dim, 2, 2, 2, 2, tkDNN::POOLING_MAX);
l = new tkDNN::Conv2d (&net, l->output_dim, 50, 5, 5, 1, 1, c1_bin, c1_bias_bin);
l = new tkDNN::Pooling (&net, l->output_dim, 2, 2, 2, 2, tkDNN::POOLING_MAX);
l = new tkDNN::Dense (&net, l->output_dim, 500, d2_bin, d2_bias_bin);
l = new tkDNN::Activation (&net, l->output_dim, CUDNN_ACTIVATION_RELU);
l = new tkDNN::Dense (&net, l->output_dim, 10, d3_bin, d3_bias_bin);
l = new tkDNN::Softmax (&net, l->output_dim);

// Load input
value_type *data;
value_type *input_h;
readBinaryFile(input_bin, dim.tot(), &input_h, &data);

printDeviceVector(dim.tot(), data);
dim.print(); //print initial dimension

TIMER_START

// Inference
data = net.infer(dim, data);

TIMER_STOP
dim.print();

// Print result
std::cout<<"\n======= RESULT =======\n";
printDeviceVector(dim.tot(), data);

// Print real test
std::cout<<"\n==== CHECK RESULT ====\n";
value_type *out;
value_type *out_h;
readBinaryFile(output_bin, dim.tot(), &out_h, &out);
printDeviceVector(dim.tot(), out);

return 0;
}
40 changes: 0 additions & 40 deletions tests/test.cpp

This file was deleted.

54 changes: 54 additions & 0 deletions tests/test/test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
#include<iostream>
#include "tkdnn.h"

const char *input_bin = "../tests/test/input.bin";
const char *c0_bin = "../tests/test/layers/conv0.bin";
const char *c0_bias_bin = "../tests/test/layers/conv0.bias.bin";
const char *c1_bin = "../tests/test/layers/conv1.bin";
const char *c1_bias_bin = "../tests/test/layers/conv1.bias.bin";
const char *d2_bin = "../tests/test/layers/dense2.bin";
const char *d2_bias_bin = "../tests/test/layers/dense2.bias.bin";
const char *output_bin = "../tests/test/output.bin";

int main() {

// Network layout
tkDNN::Network net;
tkDNN::dataDim_t dim(1, 1, 10, 10, 1);
tkDNN::Layer *l;
l = new tkDNN::Conv2d (&net, dim, 2, 4, 4, 2, 2, c0_bin, c0_bias_bin);
l = new tkDNN::Activation (&net, l->output_dim, CUDNN_ACTIVATION_RELU);
l = new tkDNN::Conv2d (&net, l->output_dim, 4, 2, 2, 1, 1, c1_bin, c1_bias_bin);
l = new tkDNN::Activation (&net, l->output_dim, CUDNN_ACTIVATION_RELU);
l = new tkDNN::Flatten (&net, l->output_dim);
l = new tkDNN::Dense (&net, l->output_dim, 4, d2_bin, d2_bias_bin);
l = new tkDNN::Activation (&net, l->output_dim, CUDNN_ACTIVATION_RELU);

// Load input
value_type *data;
value_type *input_h;
readBinaryFile(input_bin, dim.tot(), &input_h, &data);

printDeviceVector(dim.tot(), data);
dim.print(); //print initial dimension

TIMER_START

// Inference
data = net.infer(dim, data); dim.print();


TIMER_STOP

// Print result
std::cout<<"\n======= RESULT =======\n";
printDeviceVector(dim.tot(), data);

// Print real test
std::cout<<"\n==== CHECK RESULT ====\n";
value_type *out;
value_type *out_h;
readBinaryFile(output_bin, dim.tot(), &out_h, &out);
printDeviceVector(dim.tot(), out);
return 0;
}
8 changes: 4 additions & 4 deletions tests/test_model.py → tests/test/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from keras.models import Sequential, Model
from keras.layers import Cropping2D
import keras.backend.tensorflow_backend as KTF
from weights_exporter import *

def dense_model():
model = Sequential()
Expand All @@ -17,6 +16,8 @@ def dense_model():
bias_initializer='random_uniform', activation="relu"))
model.add(Convolution2D(4, (2, 2), subsample=(1, 1),
bias_initializer='random_uniform', activation="relu"))
model.add(Flatten())
model.add(Dense(4, bias_initializer='random_uniform', activation="relu"))
sgd = keras.optimizers.Adam(lr=1e-4, decay=1e-8)
model.compile(optimizer=sgd, loss="mse")
return model
Expand All @@ -26,9 +27,7 @@ def dense_model():
print "DATA FORMAT: ", keras.backend.image_data_format()

model = dense_model()
wg = model.get_weights()
export_conv2d("conv0", wg[0], wg[1])
export_conv2d("conv1", wg[2], wg[3])
model.save("net.h5")

grid = np.random.rand(10,10)
X = grid[None,:,:]
Expand All @@ -41,3 +40,4 @@ def dense_model():
print np.shape(r)
print "Result: ", r
print "Result shape: ", np.shape(r)
r.tofile("output.bin", format="f")
33 changes: 7 additions & 26 deletions tests/weights_exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,9 +99,7 @@ def get_session(gpu_fraction=0.5):
parser = argparse.ArgumentParser(description='KERAS WEIGHTS EXPORTER TO CUDNN')
parser.add_argument('model',type=str,
help='Path to model h5 file. Model should be on the same path.')
parser.add_argument('layers', type=str, help="layers list [ dense, conv2d ]", nargs='+')
parser.add_argument('--output', type=str, help="output directory", default="layers")
parser.add_argument('--test_db', type=str, help="input db to test", default=None)

args = parser.parse_args()

Expand All @@ -119,34 +117,17 @@ def get_session(gpu_fraction=0.5):

num = 0
name_num = 0
for i in args.layers:
if i == "conv3d":
for l in model.layers:
name = l.name
if name.startswith("conv3d"):
export_conv3d(args.output + "/conv" + str(name_num), weights[num], weights[num+1])
elif i == "conv2d":
elif name.startswith("conv2d"):
export_conv2d(args.output + "/conv" + str(name_num), weights[num], weights[num+1])
elif i == "dense":
elif name.startswith("dense"):
export_dense(args.output + "/dense" + str(name_num), weights[num], weights[num+1])
else:
print "error: ", i, "is not a layer type"
break
print "skip:", name, "has no weights"
continue
name_num += 1
num += 2

if args.test_db != None:
print "Test on db: ", args.test_db
db = lmdb.open(args.test_db, subdir=False, readonly=True, lock=False)
txn = db.begin()

s = random.randint(0, txn.stat()["entries"]-1)
print "camp number: ", s
s = txn.get(str(s))
c = msgpack.unpackb(s)

print "Steer, throttle: ", c["actuators"]
print "Speed (m/s): ", c["speed"]
grid = np.asarray(c["bitmap"], np.float32)
i = np.array(grid.flatten(), dtype=np.float32)
i.tofile(args.output + "input.bin", format="f")
X = grid[None, :, :]

print "Prediction: ", model.predict(X)

0 comments on commit 483ffef

Please sign in to comment.