Skip to content

Commit

Permalink
Merge pull request #467 from tensorlayer/release-1.8.4rc0
Browse files Browse the repository at this point in the history
[release] 1.8.4rc1
  • Loading branch information
wagamamaz authored Apr 5, 2018
2 parents 60cfcb1 + cd5e1e9 commit a86008e
Show file tree
Hide file tree
Showing 11 changed files with 38 additions and 30 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ TensorLayer is a deep learning and reinforcement learning library on top of [Ten
- Useful links: [Documentation](http://tensorlayer.readthedocs.io), [Examples](http://tensorlayer.readthedocs.io/en/latest/user/example.html), [中文文档](https://tensorlayercn.readthedocs.io), [中文书](http://www.broadview.com.cn/book/5059)

# News
* [05 Apr] Release [models APIs](http://tensorlayer.readthedocs.io/en/latest/modules/models.html#) for well-known pretained networks.
* [18 Mar] Release experimental APIs for binary networks.
* [18 Jan] [《深度学习:一起玩转TensorLayer》](http://www.broadview.com.cn/book/5059) (Deep Learning using TensorLayer)
* [17 Dec] Release experimental APIs for distributed training (by [TensorPort](https://tensorport.com)). See [tiny example](https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_mnist_distributed.py).
Expand Down
6 changes: 3 additions & 3 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@
# built documents.
#
# The short X.Y version.
version = '1.8.4rc0'
version = '1.8.4rc1'
# The full version, including alpha/beta/rc tags.
release = '1.8.4rc0'
release = '1.8.4rc1'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down Expand Up @@ -143,7 +143,7 @@
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'TensorLayer v1.8.4rc0'
# html_title = 'TensorLayer v1.8.4rc1'

# A shorter title for the navigation bar. Default is the same as html_title.
#
Expand Down
36 changes: 18 additions & 18 deletions example/tutorial_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,13 @@ def main_test_layers(model='relu'):
n_batch += 1
print(" val loss: %f" % (val_loss / n_batch))
print(" val acc: %f" % (val_acc / n_batch))
try:
# You can visualize the weight of 1st hidden layer as follow.
tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
# You can also save the weight of 1st hidden layer to .npz file.
# tl.files.save_npz([network.all_params[0]] , name='w1'+str(epoch+1)+'.npz')
except: # pylint: disable=bare-except
print("You should change vis.draw_weights(), if you want to save the feature images for different dataset")
# try:
# # You can visualize the weight of 1st hidden layer as follow.
# tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
# # You can also save the weight of 1st hidden layer to .npz file.
# # tl.files.save_npz([network.all_params[0]] , name='w1'+str(epoch+1)+'.npz')
# except: # pylint: disable=bare-except
# print("You should change vis.draw_weights(), if you want to save the feature images for different dataset")

print('Evaluation')
test_loss, test_acc, n_batch = 0, 0, 0
Expand Down Expand Up @@ -306,11 +306,11 @@ def main_test_stacked_denoise_AE(model='relu'):
n_batch += 1
print(" val loss: %f" % (val_loss / n_batch))
print(" val acc: %f" % (val_acc / n_batch))
try:
# visualize the 1st hidden layer during fine-tune
tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
except: # pylint: disable=bare-except
print("You should change vis.draw_weights(), if you want to save the feature images for different dataset")
# try:
# # visualize the 1st hidden layer during fine-tune
# tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
# except: # pylint: disable=bare-except
# print("You should change vis.draw_weights(), if you want to save the feature images for different dataset")

print('Evaluation')
test_loss, test_acc, n_batch = 0, 0, 0
Expand Down Expand Up @@ -451,10 +451,10 @@ def main_test_cnn_layer():
n_batch += 1
print(" val loss: %f" % (val_loss / n_batch))
print(" val acc: %f" % (val_acc / n_batch))
try:
tl.vis.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_' + str(epoch + 1), fig_idx=2012)
except: # pylint: disable=bare-except
print("You should change vis.CNN(), if you want to save the feature images for different dataset")
# try:
# tl.vis.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_' + str(epoch + 1), fig_idx=2012)
# except: # pylint: disable=bare-except
# print("You should change vis.CNN(), if you want to save the feature images for different dataset")

print('Evaluation')
test_loss, test_acc, n_batch = 0, 0, 0
Expand All @@ -474,7 +474,7 @@ def main_test_cnn_layer():
sess = tf.InteractiveSession()

# Dropout and Dropconnect
main_test_layers(model='relu') # model = relu, dropconnect
# main_test_layers(model='relu') # model = relu, dropconnect

# Single Denoising Autoencoder
# main_test_denoise_AE(model='sigmoid') # model = relu, sigmoid
Expand All @@ -483,4 +483,4 @@ def main_test_cnn_layer():
# main_test_stacked_denoise_AE(model='relu') # model = relu, sigmoid

# CNN
# main_test_cnn_layer()
main_test_cnn_layer()
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

setup(
name="tensorlayer",
version="1.8.4rc0",
version="1.8.4rc1",
include_package_data=True,
author='TensorLayer Contributors',
author_email='hao.dong11@imperial.ac.uk',
Expand Down
2 changes: 1 addition & 1 deletion tensorlayer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
act = activation
vis = visualize

__version__ = "1.8.4rc0"
__version__ = "1.8.4rc1"

global_flag = {}
global_dict = {}
6 changes: 2 additions & 4 deletions tensorlayer/layers/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1387,8 +1387,6 @@ def __init__(
# self.all_layers = list(layer.all_layers)
# self.all_params = list(layer.all_params)
# self.all_drop = dict(layer.all_drop)
# self.all_drop.update({LayersConfig.set_keep[name]: keep})
# self.all_layers.append(self.outputs)
# self.all_params.extend([W, b])

self.all_drop.update({LayersConfig.set_keep[name]: keep})
self.all_layers.append(self.outputs)
self.all_params.extend([W, b])
2 changes: 1 addition & 1 deletion tensorlayer/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""A collections of pre-defined well known models."""
# """A collections of pre-defined well known models."""

from .vgg16 import VGG16
from .squeezenetv1 import SqueezeNetV1
Expand Down
3 changes: 3 additions & 0 deletions tensorlayer/models/mobilenetv1.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ class MobileNetV1(Layer):
Examples
---------
Classify ImageNet classes, see `tutorial_models_mobilenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_mobilenetv1.py>__`
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get the whole model
>>> net = tl.models.MobileNetV1(x)
Expand All @@ -42,6 +43,7 @@ class MobileNetV1(Layer):
>>> probs = tf.nn.softmax(net.outputs)
Extract features and Train a classifier with 100 classes
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get model without the last layer
>>> cnn = tl.models.MobileNetV1(x, end_with='reshape')
Expand All @@ -57,6 +59,7 @@ class MobileNetV1(Layer):
>>> train_params = tl.layers.get_variables_with_name('output')
Reuse model
>>> x1 = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> x2 = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get VGG without the last layer
Expand Down
3 changes: 3 additions & 0 deletions tensorlayer/models/squeezenetv1.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ class SqueezeNetV1(Layer):
Examples
---------
Classify ImageNet classes, see `tutorial_models_squeezenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_squeezenetv1.py>__`
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get the whole model
>>> net = tl.models.SqueezeNetV1(x)
Expand All @@ -42,6 +43,7 @@ class SqueezeNetV1(Layer):
>>> probs = tf.nn.softmax(net.outputs)
Extract features and Train a classifier with 100 classes
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get model without the last layer
>>> cnn = tl.models.SqueezeNetV1(x, end_with='fire9')
Expand All @@ -57,6 +59,7 @@ class SqueezeNetV1(Layer):
>>> train_params = tl.layers.get_variables_with_name('output')
Reuse model
>>> x1 = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> x2 = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get VGG without the last layer
Expand Down
3 changes: 3 additions & 0 deletions tensorlayer/models/vgg16.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ class VGG16(VGG16Base):
Examples
---------
Classify ImageNet classes with VGG16, see `tutorial_models_vgg16.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_vgg16.py>__`
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get the whole model
>>> vgg = tl.models.VGG16(x)
Expand All @@ -255,6 +256,7 @@ class VGG16(VGG16Base):
>>> probs = tf.nn.softmax(vgg.outputs)
Extract features with VGG16 and Train a classifier with 100 classes
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get VGG without the last layer
>>> vgg = tl.models.VGG16(x, end_with='fc2_relu')
Expand All @@ -269,6 +271,7 @@ class VGG16(VGG16Base):
>>> train_params = tl.layers.get_variables_with_name('out')
Reuse model
>>> x1 = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> x2 = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get VGG without the last layer
Expand Down
4 changes: 2 additions & 2 deletions tests/test_layers_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,8 @@
if len(net.all_layers) != 2:
raise Exception("layers dont match")

if len(net.all_params) != 2:
if len(net.all_params) != 4:
raise Exception("params dont match")

if net.count_params() != 78500:
if net.count_params() != 88600:
raise Exception("params dont match")

0 comments on commit a86008e

Please sign in to comment.