Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

change network config format, and update layer act #980

Merged
merged 27 commits into from
Jun 2, 2019
Merged
Show file tree
Hide file tree
Changes from 21 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
f51838f
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 12, 2019
341cf1c
(non)trainable weights, layer all_layers
warshallrho May 12, 2019
58edaed
weights -> all_weights
warshallrho May 12, 2019
647d953
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 12, 2019
ad973fd
weights -> all_weights, trainable weights, nontrainable_weights
warshallrho May 12, 2019
0af6056
fix bugs, yapf
warshallrho May 13, 2019
37bb705
fix bugs
warshallrho May 13, 2019
be27eb2
fix bugs
warshallrho May 13, 2019
760b219
fix bugs
warshallrho May 13, 2019
4024ace
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 20, 2019
cc1feb7
alpha version, update network config
warshallrho May 25, 2019
372aa6b
fix bug
warshallrho May 25, 2019
1ad5b82
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 25, 2019
e79b8d6
add files
warshallrho May 25, 2019
74c6ada
Update CHANGELOG.md
warshallrho May 25, 2019
0866b56
fix bugs
warshallrho May 25, 2019
a795f72
yapf
warshallrho May 25, 2019
03e990c
update act in base layer and related layers
warshallrho May 25, 2019
0261521
fix bugs
warshallrho May 25, 2019
5549234
fix bug
warshallrho May 25, 2019
0f1ae82
fix bugs
warshallrho May 25, 2019
c415b79
Merge branch 'master' into master
zsdonghao May 29, 2019
daef3c3
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 29, 2019
65ee5a6
parse float in lrelu
warshallrho May 29, 2019
39ba8b7
Merge branch 'master' of https://github.com/warshallrho/tensorlayer2
warshallrho May 29, 2019
d7b8e7d
yapf
warshallrho May 29, 2019
d9c3b53
Merge branch 'master' into master
zsdonghao Jun 2, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 16 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,21 @@ To release a new version, please update the changelog as followed:

<!-- YOU CAN EDIT FROM HERE -->

## [2.1.0] - 2019-5-25

### Changed
- change the format of network config, change related code and files; change layer act (PR #980)

### Added

### Dependencies Update

### Fixed

### Contributors
- @warshallrho: #PR980


## [2.0.1] - 2019-5-17


Expand Down Expand Up @@ -460,4 +475,4 @@ To many PR for this update, please check [here](https://github.com/tensorlayer/t
[1.10.0]: https://github.com/tensorlayer/tensorlayer/compare/1.9.1...1.10.0
[1.9.1]: https://github.com/tensorlayer/tensorlayer/compare/1.9.0...1.9.1
[1.9.0]: https://github.com/tensorlayer/tensorlayer/compare/1.8.5...1.9.0
[1.8.5]: https://github.com/tensorlayer/tensorlayer/compare/1.8.4...1.8.5
[1.8.5]: https://github.com/tensorlayer/tensorlayer/compare/1.8.4...1.8.5
3 changes: 1 addition & 2 deletions docs/user/get_start_model.rst
Original file line number Diff line number Diff line change
Expand Up @@ -208,10 +208,9 @@ z = f(x*W+b)

class Dense(Layer):
def __init__(self, n_units, act=None, in_channels=None, name=None):
super(Dense, self).__init__(name)
super(Dense, self).__init__(name, act=act)

self.n_units = n_units
self.act = act
self.in_channels = in_channels

# for dynamic model, it needs the input shape to get the shape of W
Expand Down
2 changes: 1 addition & 1 deletion examples/text_generation/tutorial_generate_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,4 +329,4 @@ def main_lstm_generate_text():
# main_restore_embedding_layer()

# How to generate text from a given context
main_lstm_generate_text()
main_lstm_generate_text()
4 changes: 2 additions & 2 deletions tensorlayer/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
import tensorflow as tf

from tensorlayer import logging
from tensorlayer.files import net2static_graph, static_graph2net, assign_weights
from tensorlayer.files import static_graph2net, assign_weights
from tensorlayer.files import save_weights_to_hdf5, load_hdf5_to_weights
from tensorlayer.files import del_folder, exists_or_mkdir

Expand Down Expand Up @@ -153,7 +153,7 @@ def save_model(self, network=None, model_name='model', **kwargs):
s = time.time()

# kwargs.update({'architecture': network.all_graphs, 'time': datetime.utcnow()})
kwargs.update({'architecture': net2static_graph(network), 'time': datetime.utcnow()})
kwargs.update({'architecture': network.config, 'time': datetime.utcnow()})

try:
params_id = self.model_fs.put(self._serialization(params))
Expand Down
221 changes: 120 additions & 101 deletions tensorlayer/files/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util import serialization
import json
import datetime

# from six.moves import zip

Expand Down Expand Up @@ -73,7 +74,7 @@
'load_hdf5_to_weights',
'save_hdf5_graph',
'load_hdf5_graph',
'net2static_graph',
# 'net2static_graph',
'static_graph2net',
# 'save_pkl_graph',
# 'load_pkl_graph',
Expand All @@ -92,29 +93,29 @@ def str2func(s):
return expr


def net2static_graph(network):
saved_file = dict()
if network._NameNone is True:
saved_file.update({"name": None})
else:
saved_file.update({"name": network.name})
if not isinstance(network.inputs, list):
saved_file.update({"inputs": network.inputs._info[0].name})
else:
saved_inputs = []
for saved_input in network.inputs:
saved_inputs.append(saved_input._info[0].name)
saved_file.update({"inputs": saved_inputs})
if not isinstance(network.outputs, list):
saved_file.update({"outputs": network.outputs._info[0].name})
else:
saved_outputs = []
for saved_output in network.outputs:
saved_outputs.append(saved_output._info[0].name)
saved_file.update({"outputs": saved_outputs})
saved_file.update({"config": network.config})

return saved_file
# def net2static_graph(network):
# saved_file = dict()
# # if network._NameNone is True:
# # saved_file.update({"name": None})
# # else:
# # saved_file.update({"name": network.name})
# # if not isinstance(network.inputs, list):
# # saved_file.update({"inputs": network.inputs._info[0].name})
# # else:
# # saved_inputs = []
# # for saved_input in network.inputs:
# # saved_inputs.append(saved_input._info[0].name)
# # saved_file.update({"inputs": saved_inputs})
# # if not isinstance(network.outputs, list):
# # saved_file.update({"outputs": network.outputs._info[0].name})
# # else:
# # saved_outputs = []
# # for saved_output in network.outputs:
# # saved_outputs.append(saved_output._info[0].name)
# # saved_file.update({"outputs": saved_outputs})
# saved_file.update({"config": network.config})
#
# return saved_file


@keras_export('keras.models.save_model')
Expand Down Expand Up @@ -149,7 +150,7 @@ def load_keras_model(model_config):
return model


def save_hdf5_graph(network, filepath='model.hdf5', save_weights=False):
def save_hdf5_graph(network, filepath='model.hdf5', save_weights=False, customized_data=None):
"""Save the architecture of TL model into a hdf5 file. Support saving model weights.

Parameters
Expand All @@ -160,6 +161,8 @@ def save_hdf5_graph(network, filepath='model.hdf5', save_weights=False):
The name of model file.
save_weights : bool
Whether to save model weights.
customized_data : dict
The user customized meta data.

Examples
--------
Expand All @@ -177,11 +180,22 @@ def save_hdf5_graph(network, filepath='model.hdf5', save_weights=False):

logging.info("[*] Saving TL model into {}, saving weights={}".format(filepath, save_weights))

saved_file = net2static_graph(network)
saved_file_str = str(saved_file)
model_config = network.config # net2static_graph(network)
model_config_str = str(model_config)
customized_data_str = str(customized_data)
version_info = {
"tensorlayer_version": tl.__version__,
"backend": "tensorflow",
"backend_version": tf.__version__,
"training_device": "gpu",
"save_date": datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
}
version_info_str = str(version_info)

with h5py.File(filepath, 'w') as f:
f.attrs["model_structure"] = saved_file_str.encode('utf8')
f.attrs["model_config"] = model_config_str.encode('utf8')
f.attrs["customized_data"] = customized_data_str.encode('utf8')
f.attrs["version_info"] = version_info_str.encode('utf8')
if save_weights:
_save_weights_to_hdf5_group(f, network.all_layers)
f.flush()
Expand Down Expand Up @@ -237,29 +251,15 @@ def eval_layer(layer_kwargs):
raise RuntimeError("Unknown layer type.")


def static_graph2net(saved_file):
def static_graph2net(model_config):
layer_dict = {}
model_name = saved_file['name']
inputs_tensors = saved_file['inputs']
outputs_tensors = saved_file['outputs']
all_args = saved_file['config']
tf_version = saved_file['config'].pop(0)['tf_version']
tl_version = saved_file['config'].pop(0)['tl_version']
if tf_version != tf.__version__:
logging.warning(
"Saved model uses tensorflow version {}, but now you are using tensorflow version {}".format(
tf_version, tf.__version__
)
)
if tl_version != tl.__version__:
logging.warning(
"Saved model uses tensorlayer version {}, but now you are using tensorlayer version {}".format(
tl_version, tl.__version__
)
)
model_name = model_config["name"]
inputs_tensors = model_config["inputs"]
outputs_tensors = model_config["outputs"]
all_args = model_config["model_architecture"]
for idx, layer_kwargs in enumerate(all_args):
layer_class = layer_kwargs['class'] # class of current layer
prev_layers = layer_kwargs.pop('prev_layer') # name of previous layers
layer_class = layer_kwargs["class"] # class of current layer
prev_layers = layer_kwargs.pop("prev_layer") # name of previous layers
net = eval_layer(layer_kwargs)
if layer_class in tl.layers.inputs.__all__:
net = net._nodes[0].out_tensors[0]
Expand Down Expand Up @@ -312,11 +312,30 @@ def load_hdf5_graph(filepath='model.hdf5', load_weights=False):
- see ``tl.files.save_hdf5_graph``
"""
logging.info("[*] Loading TL model from {}, loading weights={}".format(filepath, load_weights))

f = h5py.File(filepath, 'r')
saved_file_str = f.attrs["model_structure"].decode('utf8')
saved_file = eval(saved_file_str)

M = static_graph2net(saved_file)
version_info_str = f.attrs["version_info"].decode('utf8')
version_info = eval(version_info_str)
backend_version = version_info["backend_version"]
tensorlayer_version = version_info["tensorlayer_version"]
if backend_version != tf.__version__:
logging.warning(
"Saved model uses tensorflow version {}, but now you are using tensorflow version {}".format(
backend_version, tf.__version__
)
)
if tensorlayer_version != tl.__version__:
logging.warning(
"Saved model uses tensorlayer version {}, but now you are using tensorlayer version {}".format(
tensorlayer_version, tl.__version__
)
)

model_config_str = f.attrs["model_config"].decode('utf8')
model_config = eval(model_config_str)

M = static_graph2net(model_config)
if load_weights:
if not ('layer_names' in f.attrs.keys()):
raise RuntimeError("Saved model does not contain weights.")
Expand All @@ -329,55 +348,55 @@ def load_hdf5_graph(filepath='model.hdf5', load_weights=False):
return M


def load_pkl_graph(name='model.pkl'):
"""Restore TL model archtecture from a a pickle file. No parameters be restored.

Parameters
-----------
name : str
The name of graph file.

Returns
--------
network : TensorLayer Model.

Examples
--------
>>> # It is better to use load_hdf5_graph
"""
logging.info("[*] Loading TL graph from {}".format(name))
with open(name, 'rb') as file:
saved_file = pickle.load(file)

M = static_graph2net(saved_file)

return M


def save_pkl_graph(network, name='model.pkl'):
"""Save the architecture of TL model into a pickle file. No parameters be saved.

Parameters
-----------
network : TensorLayer layer
The network to save.
name : str
The name of graph file.

Example
--------
>>> # It is better to use save_hdf5_graph
"""
if network.outputs is None:
raise AssertionError("save_graph not support dynamic mode yet")

logging.info("[*] Saving TL graph into {}".format(name))

saved_file = net2static_graph(network)

with open(name, 'wb') as file:
pickle.dump(saved_file, file, protocol=pickle.HIGHEST_PROTOCOL)
logging.info("[*] Saved graph")
# def load_pkl_graph(name='model.pkl'):
# """Restore TL model archtecture from a a pickle file. No parameters be restored.
#
# Parameters
# -----------
# name : str
# The name of graph file.
#
# Returns
# --------
# network : TensorLayer Model.
#
# Examples
# --------
# >>> # It is better to use load_hdf5_graph
# """
# logging.info("[*] Loading TL graph from {}".format(name))
# with open(name, 'rb') as file:
# saved_file = pickle.load(file)
#
# M = static_graph2net(saved_file)
#
# return M
#
#
# def save_pkl_graph(network, name='model.pkl'):
# """Save the architecture of TL model into a pickle file. No parameters be saved.
#
# Parameters
# -----------
# network : TensorLayer layer
# The network to save.
# name : str
# The name of graph file.
#
# Example
# --------
# >>> # It is better to use save_hdf5_graph
# """
# if network.outputs is None:
# raise AssertionError("save_graph not support dynamic mode yet")
#
# logging.info("[*] Saving TL graph into {}".format(name))
#
# saved_file = net2static_graph(network)
#
# with open(name, 'wb') as file:
# pickle.dump(saved_file, file, protocol=pickle.HIGHEST_PROTOCOL)
# logging.info("[*] Saved graph")


# Load dataset functions
Expand Down
3 changes: 1 addition & 2 deletions tensorlayer/layers/convolution/binary_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,11 +75,10 @@ def __init__(
in_channels=None,
name=None # 'binary_cnn2d',
):
super().__init__(name)
super().__init__(name, act=act)
self.n_filter = n_filter
self.filter_size = filter_size
self.strides = self._strides = strides
self.act = act
self.padding = padding
self.use_gemm = use_gemm
self.data_format = data_format
Expand Down
3 changes: 1 addition & 2 deletions tensorlayer/layers/convolution/deformable_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,12 +83,11 @@ def __init__(
in_channels=None,
name=None # 'deformable_conv_2d',
):
super().__init__(name)
super().__init__(name, act=act)

self.offset_layer = offset_layer
self.n_filter = n_filter
self.filter_size = filter_size
self.act = act
self.padding = padding
self.W_init = W_init
self.b_init = b_init
Expand Down
3 changes: 1 addition & 2 deletions tensorlayer/layers/convolution/depthwise_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,9 @@ def __init__(
in_channels=None,
name=None # 'depthwise_conv2d'
):
super().__init__(name)
super().__init__(name, act=act)
self.filter_size = filter_size
self.strides = self._strides = strides
self.act = act
self.padding = padding
self.dilation_rate = self._dilation_rate = dilation_rate
self.data_format = data_format
Expand Down
Loading