-
Notifications
You must be signed in to change notification settings - Fork 1
/
pdOptOneDim.py
90 lines (62 loc) · 2.01 KB
/
pdOptOneDim.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#An optimization based modification of a time series through TDE representation.
import torch
from common import convertTde
from common import compPairDist
from common import tgtRipsPdFromTimeSeries
from common import gudhiToTensorList
import numpy as np
from tdeLayerOne import tdeLayerOne
from elemProdLayer import elemProdLayer
from pairDistLayer import pairDistLayer
from ripsLayerOneDim import ripsLayer
from various_func_grad import comp2WassOneDim
#device = torch.device('cpu')
device = torch.device('cuda') #use in case the computer has an Nvidia GPU and CUDA is installed.
N = 100
pcDim = 3
homDim = 0
homDimList = [0]
strictDim = [0]
maxEdgeLen = 10.
oneVec = torch.ones(N, device=device, requires_grad = False)
tgtTS = np.random.randn(N)
print(tgtTS)
tgtPC = torch.tensor(convertTde(tgtTS), dtype=torch.float, requires_grad = False)
tgtPDGudhi = tgtRipsPdFromTimeSeries(tgtTS, pcDim, homDimList, maxEdgeLen)
tgtPDList = gudhiToTensorList(tgtPDGudhi, homDimList, maxEdgeLen)
tgtPD = tgtPDList[0]
reqTS = torch.randn(len(tgtTS), device=device, requires_grad=True)
stepSiz = 1e-2
myRipsLayerApply = ripsLayer.apply
myPairDistLayer = pairDistLayer(compPairDist)
myTdeLayer = tdeLayerOne()
myElemProdLayer = elemProdLayer()
for t in range(1000):
scaleTS = myElemProdLayer(reqTS, oneVec)
#print(scaleTS)
varPC = myTdeLayer(scaleTS)
pairDistVec = myPairDistLayer(varPC)
varPD = myRipsLayerApply(pairDistVec, homDim, maxEdgeLen)
'''
print('tgtPD')
print(tgtPD)
print('varPD')
print(varPD)
'''
#loss = torch.sum(pairDistVec)
loss = comp2WassOneDim(varPD, tgtPD)
#loss = (varPC - tgtPC).pow(2).sum()
#loss = varPC[0,0]
#loss = varPD[0][0]
'''
for p in range(int(len(varPD)/2)):
loss += varPD[2*p]
if varPD[2*p + 1] != maxEdgeLen:
loss += varPD[2*p + 1]
loss = torch.tensor(loss)
'''
print('iteration %d loss %f' % (t,loss.item()))
loss.backward()
with torch.no_grad():
reqTS -= stepSiz*reqTS.grad
reqTS.grad.zero_()