Skip to content

Commit

Permalink
Mqcnn rts (#668)
Browse files Browse the repository at this point in the history
* Getting MQCNN up to date.

Co-authored-by: Jasper Schulz <schjaspe@amazon.de>
Co-authored-by: Aaron Spieler <aspiele@amazon.com>
Co-authored-by: Lorenzo Stella <lorenzostella@gmail.com>
  • Loading branch information
4 people authored May 18, 2020
1 parent 3aa027f commit 0d963f7
Show file tree
Hide file tree
Showing 26 changed files with 1,127 additions and 438 deletions.
11 changes: 6 additions & 5 deletions src/gluonts/block/decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ def hybrid_forward(
pass


# TODO: add support for static variables at some point
class ForkingMLPDecoder(Seq2SeqDecoder):
"""
Multilayer perceptron decoder for sequence-to-sequence models.
Expand All @@ -76,7 +77,7 @@ def __init__(
self,
dec_len: int,
final_dim: int,
hidden_dimension_sequence: List[int] = list([]),
hidden_dimension_sequence: List[int] = [],
**kwargs,
) -> None:
super().__init__(**kwargs)
Expand Down Expand Up @@ -104,6 +105,7 @@ def __init__(
)
self.model.add(layer)

# TODO: add support for static input at some point
def hybrid_forward(
self, F, dynamic_input: Tensor, static_input: Tensor = None
) -> Tensor:
Expand All @@ -115,11 +117,10 @@ def hybrid_forward(
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C).
dynamic_features, shape (batch_size, sequence_length, num_features) or (N, T, C)
where sequence_length is equal to the encoder length, and num_features is equal
to channel_seq[-1] for the MQCNN for example.
static_input
not used in this decoder.
Expand Down
83 changes: 66 additions & 17 deletions src/gluonts/block/enc2dec.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def hybrid_forward(
F,
encoder_output_static: Tensor,
encoder_output_dynamic: Tensor,
future_features: Tensor,
future_features_dynamic: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
Expand All @@ -48,40 +48,36 @@ def hybrid_forward(
shape (batch_size, num_features) or (N, C)
encoder_output_dynamic
shape (batch_size, context_length, num_features) or (N, T, C)
shape (batch_size, sequence_length, num_features) or (N, T, C)
future_features
shape (batch_size, prediction_length, num_features) or (N, T, C)
future_features_dynamic
shape (batch_size, sequence_length, prediction_length, num_features) or (N, T, P, C`)
Returns
-------
Tensor
shape (batch_size, num_features) or (N, C)
Tensor
shape (batch_size, prediction_length, num_features) or (N, T, C)
Tensor
shape (batch_size, sequence_length, num_features) or (N, T, C)
"""
pass


class PassThroughEnc2Dec(Seq2SeqEnc2Dec):
"""
Simplest class for passing encoder tensors do decoder. Passes through
tensors.
tensors, except that future_features_dynamic is dropped.
"""

def hybrid_forward(
self,
F,
encoder_output_static: Tensor,
encoder_output_dynamic: Tensor,
future_features: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
future_features_dynamic: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
Expand All @@ -90,10 +86,10 @@ def hybrid_forward(
shape (batch_size, num_features) or (N, C)
encoder_output_dynamic
shape (batch_size, context_length, num_features) or (N, T, C)
shape (batch_size, sequence_length, num_features) or (N, T, C)
future_features
shape (batch_size, prediction_length, num_features) or (N, T, C)
future_features_dynamic
shape (batch_size, sequence_length, prediction_length, num_features) or (N, T, P, C`)
Returns
Expand All @@ -102,10 +98,63 @@ def hybrid_forward(
shape (batch_size, num_features) or (N, C)
Tensor
shape (batch_size, prediction_length, num_features) or (N, T, C)
shape (batch_size, prediction_length, num_features_02) or (N, T, C)
"""
return encoder_output_static, encoder_output_dynamic

Tensor

class FutureFeatIntegratorEnc2Dec(Seq2SeqEnc2Dec):
"""
Integrates the encoder_ouput_dynamic and future_features_dynamic into one
and passes them through as the dynamic input to the decoder.
"""

def hybrid_forward(
self,
F,
encoder_output_static: Tensor,
encoder_output_dynamic: Tensor,
future_features_dynamic: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
encoder_output_static
shape (batch_size, num_features) or (N, C)
encoder_output_dynamic
shape (batch_size, sequence_length, num_features) or (N, T, C)
future_features_dynamic
shape (batch_size, sequence_length, prediction_length, num_features) or (N, T, P, C`)
Returns
-------
Tensor
shape (batch_size, num_features) or (N, C)
Tensor
shape (batch_size, prediction_length, num_features_02) or (N, T, C)
Tensor
shape (1,)
"""
return encoder_output_static, encoder_output_dynamic, future_features

# flatten the last two dimensions:
# => (batch_size, encoder_length, decoder_length * num_feature_dynamic)
future_features_dynamic = F.reshape(
future_features_dynamic, shape=(0, 0, -1)
)

# concatenate output of decoder and future_feat_dynamic covariates:
# => (batch_size, encoder_length, num_dec_input_dynamic + num_future_feat_dynamic)
total_dec_input_dynamic = F.concat(
encoder_output_dynamic, future_features_dynamic, dim=2
)

return (
encoder_output_static,
total_dec_input_dynamic,
)
Loading

0 comments on commit 0d963f7

Please sign in to comment.