diff --git a/README.md b/README.md
index a8a6d856..dff3ce44 100644
--- a/README.md
+++ b/README.md
@@ -116,7 +116,7 @@ pip install -e .
 ```python
 >> import ampligraph
 >> ampligraph.__version__
-'1.3.0'
+'1.3.1'
 ```
 
 
diff --git a/ampligraph/__init__.py b/ampligraph/__init__.py
index 48eee913..04987f05 100644
--- a/ampligraph/__init__.py
+++ b/ampligraph/__init__.py
@@ -12,7 +12,7 @@
 import tensorflow as tf
 tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
 
-__version__ = '1.3.0'
+__version__ = '1.3.1'
 __all__ = ['datasets', 'latent_features', 'discovery', 'evaluation', 'utils']
 
 logging.config.fileConfig(pkg_resources.resource_filename(__name__, 'logger.conf'), disable_existing_loggers=False)
diff --git a/ampligraph/evaluation/protocol.py b/ampligraph/evaluation/protocol.py
index b07a5f6c..2ee85a42 100644
--- a/ampligraph/evaluation/protocol.py
+++ b/ampligraph/evaluation/protocol.py
@@ -492,6 +492,7 @@ def evaluate_performance(X, model, filter_triples=None, verbose=False, filter_un
             * We compute the rank of the test triple by comparing against ALL the corruptions.
             * We then compute the number of False negatives that are ranked higher than the test triple; and then
               subtract this value from the above computed rank to yield the final filtered rank.
+              
             **Execution Time:** This method takes ~4 minutes on FB15K using ComplEx
             (Intel Xeon Gold 6142, 64 GB Ubuntu 16.04 box, Tesla V100 16GB)
 
@@ -510,9 +511,8 @@ def evaluate_performance(X, model, filter_triples=None, verbose=False, filter_un
         - 's': corrupt only subject.
         - 'o': corrupt only object.
         - 's+o': corrupt both subject and object.
-        - 's,o': corrupt subject and object sides independently and return 2 ranks. This corresponds to the
-                 evaluation protocol used in literature, where head and tail corruptions are evaluated
-                 separately.
+        - 's,o': corrupt subject and object sides independently and return 2 ranks. This corresponds to the \
+        evaluation protocol used in literature, where head and tail corruptions are evaluated separately.
 
         .. note::
             When ``corrupt_side='s,o'`` the function will return 2*n ranks as a [n, 2] array.
@@ -520,7 +520,6 @@ def evaluate_performance(X, model, filter_triples=None, verbose=False, filter_un
             The second column of the array represents the object corruptions.
             Otherwise, the function returns n ranks as [n] array.
 
-
     use_default_protocol: bool
         Flag to indicate whether to use the standard protocol used in literature defined in
         :cite:`bordes2013translating` (default: False).
diff --git a/ampligraph/latent_features/models/ConvE.py b/ampligraph/latent_features/models/ConvE.py
index a916905e..da21cbfb 100644
--- a/ampligraph/latent_features/models/ConvE.py
+++ b/ampligraph/latent_features/models/ConvE.py
@@ -554,7 +554,7 @@ def fit(self, X, early_stopping=False, early_stopping_params={}):
         """Train a ConvE (with optional early stopping).
 
         The model is trained on a training set X using the training protocol
-        described in :cite:`Dettmers2016`.
+        described in :cite:`DettmersMS018`.
 
         Parameters
         ----------
@@ -737,7 +737,7 @@ def fit(self, X, early_stopping=False, early_stopping_params={}):
             raise e
 
     def _initialize_eval_graph(self, mode='test'):
-        """ Initialize the 1-N evaluation graph with the set protocol.
+        """ Initialize the evaluation graph with the set protocol.
 
         Parameters
         ----------
@@ -956,7 +956,7 @@ def get_ranks(self, dataset_handle):
             logger.error(msg)
             raise RuntimeError(msg)
 
-        eval_protocol = self.eval_config.get('corrupt_side', constants.DEFAULT_PROTOCOL_EVAL)
+        eval_protocol = self.eval_config.get('corrupt_side', constants.DEFAULT_CORRUPT_SIDE_EVAL)
 
         if 'o' in eval_protocol:
             object_ranks = self._get_object_ranks(dataset_handle)
diff --git a/docs/changelog.md b/docs/changelog.md
index e6732017..125f1f31 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -1,5 +1,11 @@
 # Changelog
 
+## 1.3.1 
+**18 Mar 2020**
+
+- Minor bug fix in ConvE (#189)
+
+
 ## 1.3.0 
 **9 Mar 2020**
 
diff --git a/docs/index.rst b/docs/index.rst
index f081fcd5..0d83d170 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -62,7 +62,7 @@ Modules
 AmpliGraph includes the following submodules:
 
 * **Datasets**: helper functions to load datasets (knowledge graphs).
-* **Models**: knowledge graph embedding models. AmpliGraph contains **TransE**, **DistMult**, **ComplEx**, **HolE**, **ConvKB** (More to come!)
+* **Models**: knowledge graph embedding models. AmpliGraph contains **TransE**, **DistMult**, **ComplEx**, **HolE**, **ConvE**, **ConvKB** (More to come!)
 * **Evaluation**: metrics and evaluation protocols to assess the predictive power of the models.
 * **Discovery**: High-level convenience APIs for knowledge discovery (discover new facts, cluster entities, predict near duplicates).
 
diff --git a/docs/install.md b/docs/install.md
index ba6c8799..18414f57 100644
--- a/docs/install.md
+++ b/docs/install.md
@@ -66,5 +66,5 @@ pip install -e .
 ```python
 >> import ampligraph
 >> ampligraph.__version__
-'1.3.0'
+'1.3.1'
 ```
diff --git a/tests/ampligraph/utils/test_model_utils.py b/tests/ampligraph/utils/test_model_utils.py
index b235f7df..9f96cbd0 100644
--- a/tests/ampligraph/utils/test_model_utils.py
+++ b/tests/ampligraph/utils/test_model_utils.py
@@ -12,8 +12,9 @@
 import numpy.testing as npt
 from ampligraph.utils import save_model, restore_model, create_tensorboard_visualizations, \
                              write_metadata_tsv, dataframe_to_triples
+from ampligraph.latent_features import TransE
 import pytest
-import pickle
+
 
 def test_save_and_restore_model():
 
@@ -72,14 +73,27 @@ def test_restore_model_errors():
 
 
 def test_create_tensorboard_visualizations():
-    # TODO: This
-    pass
+    # test if tensorflow API are still operative
+
+    X = np.array([['a', 'y', 'b'],
+                  ['b', 'y', 'a'],
+                  ['a', 'y', 'c'],
+                  ['c', 'y', 'a'],
+                  ['a', 'y', 'd'],
+                  ['c', 'y', 'd'],
+                  ['b', 'y', 'c'],
+                  ['f', 'y', 'e']])
+    model = TransE(batches_count=1, seed=555, epochs=20, k=10, loss='pairwise',
+                   loss_params={'margin': 5})
+    model.fit(X)
+    create_tensorboard_visualizations(model, 'tensorboard_files')
 
 
 def test_write_metadata_tsv():
     # TODO: This
     pass
 
+
 def test_dataframe_to_triples():
     X = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv')
     schema = [('species', 'has_sepal_length', 'sepal_length')]