You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Starting Training Loop...
Traceback (most recent call last):
File "D:\project_python\doctor_project\LYC\20241130\222.py", line 42, in
wt.fit(X=X, lr=1e-2, num_epochs=10)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\awave\transform.py", line 86, in fit
trainer(train_loader, epochs=num_epochs)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\awave\utils\train.py", line 84, in call
mean_epoch_loss = self._train_epoch(train_loader, epoch)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\awave\utils\train.py", line 110, in train_epoch
iter_loss = self.train_iteration(data)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\awave\utils\train.py", line 158, in train_iteration
loss.backward()
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\torch_tensor.py", line 525, in backward
torch.autograd.backward(
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\torch\autograd_init.py", line 260, in backward
grad_tensors = make_grads(tensors, grad_tensors, is_grads_batched=False)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\torch\autograd_init.py", line 141, in _make_grads
raise RuntimeError(msg)
RuntimeError: grad can be implicitly created only for real scalar outputs but got torch.complex64
The text was updated successfully, but these errors were encountered:
Starting Training Loop...
Traceback (most recent call last):
File "D:\project_python\doctor_project\LYC\20241130\222.py", line 42, in
wt.fit(X=X, lr=1e-2, num_epochs=10)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\awave\transform.py", line 86, in fit
trainer(train_loader, epochs=num_epochs)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\awave\utils\train.py", line 84, in call
mean_epoch_loss = self._train_epoch(train_loader, epoch)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\awave\utils\train.py", line 110, in train_epoch
iter_loss = self.train_iteration(data)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\awave\utils\train.py", line 158, in train_iteration
loss.backward()
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\torch_tensor.py", line 525, in backward
torch.autograd.backward(
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\torch\autograd_init.py", line 260, in backward
grad_tensors = make_grads(tensors, grad_tensors, is_grads_batched=False)
File "C:\ProgramData\miniconda3\envs\py10\lib\site-packages\torch\autograd_init.py", line 141, in _make_grads
raise RuntimeError(msg)
RuntimeError: grad can be implicitly created only for real scalar outputs but got torch.complex64
The text was updated successfully, but these errors were encountered: