Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

Commit

Permalink
test(tailor): assure weights are preserved after calling to_embedding…
Browse files Browse the repository at this point in the history
…_model (#106)

* test(tailor): assure weights are preserved

* test(tailor): add keras weight test

* test(tailor): add keras weights test
  • Loading branch information
bwanglzu authored Oct 8, 2021
1 parent a589a01 commit b448a61
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 5 deletions.
15 changes: 12 additions & 3 deletions tests/unit/tailor/test_keras.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import pytest
import tensorflow as tf
import numpy as np

from finetuner.tailor.keras import KerasTailor

Expand Down Expand Up @@ -121,10 +122,18 @@ def test_trim_fail_given_unexpected_layer_name(model, layer_name):
],
indirect=['model'],
)
def test_trim(model, layer_name, expected_output_shape):
def test_to_embedding_model(model, layer_name, expected_output_shape):
keras_tailor = KerasTailor(model)
embed_model = keras_tailor.to_embedding_model(layer_name=layer_name)
assert embed_model.output_shape == expected_output_shape
model = keras_tailor.to_embedding_model(layer_name=layer_name)
assert model.output_shape == expected_output_shape


def test_weights_preserved_given_pretrained_model(vgg16_cnn_model):
weights = vgg16_cnn_model.layers[0].get_weights()
keras_tailor = KerasTailor(vgg16_cnn_model)
vgg16_cnn_model = keras_tailor.to_embedding_model(layer_name='fc2')
weights_after_convert = vgg16_cnn_model.layers[0].get_weights()
np.testing.assert_array_equal(weights, weights_after_convert)


@pytest.mark.parametrize(
Expand Down
6 changes: 5 additions & 1 deletion tests/unit/tailor/test_paddle.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import paddle
import paddle.nn as nn
import pytest
import numpy as np

from finetuner.tailor.paddle import PaddleTailor

Expand Down Expand Up @@ -217,15 +218,18 @@ def test_freeze(model, layer_name, input_size, input_dtype, freeze):
],
indirect=['model'],
)
def test_trim(
def test_to_embedding_model(
model, layer_name, input_size, input_, input_dtype, expected_output_shape
):
weight = model.parameters()[0].numpy() # weight of the 0th layer
paddle_tailor = PaddleTailor(
model=model,
input_size=input_size,
input_dtype=input_dtype,
)
model = paddle_tailor.to_embedding_model(freeze=False, layer_name=layer_name)
weight_after_convert = model.parameters()[0].numpy()
np.testing.assert_array_equal(weight, weight_after_convert)
out = model(paddle.cast(paddle.rand(input_), input_dtype))
assert list(out.shape) == expected_output_shape

Expand Down
6 changes: 5 additions & 1 deletion tests/unit/tailor/test_torch.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import pytest
import torch
import torch.nn as nn
import numpy as np

from finetuner.tailor.pytorch import PytorchTailor

Expand Down Expand Up @@ -187,9 +188,10 @@ def test_trim_fail_given_unexpected_layer_idx(
],
indirect=['model'],
)
def test_trim(
def test_to_embedding_model(
model, layer_name, input_size, input_, input_dtype, expected_output_shape
):
weights = list(model.parameters())[0].detach().numpy() # weights of the first layer
pytorch_tailor = PytorchTailor(
model=model,
input_size=input_size,
Expand All @@ -199,6 +201,8 @@ def test_trim(
freeze=False,
layer_name=layer_name,
)
weights_after_convert = list(model.parameters())[0].detach().numpy()
np.testing.assert_array_equal(weights, weights_after_convert)
input_ = torch.rand(input_)
if input_dtype == 'int64':
input_ = input_.type(torch.LongTensor)
Expand Down

0 comments on commit b448a61

Please sign in to comment.