Skip to content

Commit

Permalink
use tensor.shape bug not paddle.shape(tensor) (#690)
Browse files Browse the repository at this point in the history
* use tensor.shape bug not paddle.shape(tensor)

* refine
  • Loading branch information
wanghuancoder authored Apr 12, 2024
1 parent ed245e9 commit 4bee9be
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 10 deletions.
6 changes: 3 additions & 3 deletions paddlevideo/modeling/backbones/swin_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def drop_path(x, drop_prob=0., training=False):
if drop_prob == 0. or not training:
return x
keep_prob = paddle.to_tensor(1 - drop_prob)
shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype)
random_tensor = paddle.floor(random_tensor) # binarize
output = x.divide(keep_prob) * random_tensor
Expand Down Expand Up @@ -317,7 +317,7 @@ def __init__(self,
drop=drop)

def forward_part1(self, x, mask_matrix):
B = paddle.shape(x)[0]
B = x.shape[0]
_, D, H, W, C = x.shape
window_size, shift_size = get_window_size((D, H, W), self.window_size,
self.shift_size)
Expand Down Expand Up @@ -513,7 +513,7 @@ def forward(self, x):
x: Input feature, tensor size (B, C, D, H, W).
"""
# calculate attention mask for SW-MSA
B = paddle.shape(x)[0]
B = x.shape[0]
_, C, D, H, W = x.shape
window_size, shift_size = get_window_size((D, H, W), self.window_size,
self.shift_size)
Expand Down
4 changes: 2 additions & 2 deletions paddlevideo/modeling/backbones/toshift_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def drop_path(x, drop_prob=0., training=False):
if drop_prob == 0. or not training:
return x
keep_prob = paddle.to_tensor(1 - drop_prob)
shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
random_tensor = keep_prob + paddle.rand(shape).astype(x.dtype)
random_tensor = paddle.floor(random_tensor) # binarize
output = x.divide(keep_prob) * random_tensor
Expand Down Expand Up @@ -374,7 +374,7 @@ def _init_fn(self, m):

def forward_features(self, x):
# B = x.shape[0]
B = paddle.shape(x)[0]
B = x.shape[0]
x, T, W = self.patch_embed(x) # [BT,nH*nW,F]
cls_tokens = self.cls_token.expand((B * T, -1, -1)) # [1,1,F]->[BT,1,F]
x = paddle.concat((cls_tokens, x), axis=1)
Expand Down
4 changes: 2 additions & 2 deletions paddlevideo/modeling/backbones/vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def drop_path(x, drop_prob=0., training=False):
if drop_prob == 0. or not training:
return x
keep_prob = paddle.to_tensor(1 - drop_prob, dtype=x.dtype)
shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
random_tensor = keep_prob + paddle.rand(shape).astype(x.dtype)
random_tensor = paddle.floor(random_tensor) # binarize
output = x.divide(keep_prob) * random_tensor
Expand Down Expand Up @@ -394,7 +394,7 @@ def _init_fn(self, m):

def forward_features(self, x):
# B = x.shape[0]
B = paddle.shape(x)[0]
B = x.shape[0]
x, T, W = self.patch_embed(x) # [BT,nH*nW,F]
cls_tokens = self.cls_token.expand((B * T, -1, -1)) # [1,1,F]->[BT,1,F]
x = paddle.concat((cls_tokens, x), axis=1)
Expand Down
4 changes: 2 additions & 2 deletions paddlevideo/modeling/backbones/vit_tweaks.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def drop_path(x, drop_prob=0., training=False):
if drop_prob == 0. or not training:
return x
keep_prob = paddle.to_tensor(1 - drop_prob)
shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype)
random_tensor = paddle.floor(random_tensor) # binarize
output = x.divide(keep_prob) * random_tensor
Expand Down Expand Up @@ -444,7 +444,7 @@ def _init_fn(self, m):

def forward_features(self, x):
# B = x.shape[0]
B = paddle.shape(x)[0]
B = x.shape[0]
x, T, W = self.patch_embed(x) # [BT,nH*nW,F]
cls_tokens = self.cls_token.expand((B * T, -1, -1)) # [1,1,F]->[BT,1,F]
x = paddle.concat((cls_tokens, x), axis=1)
Expand Down
2 changes: 1 addition & 1 deletion paddlevideo/modeling/heads/i3d_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def forward(self, x):
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
N = paddle.shape(x)[0]
N = x.shape[0]
x = x.reshape([N, -1])
# [N, in_channels]
cls_score = self.fc(x)
Expand Down

0 comments on commit 4bee9be

Please sign in to comment.