-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlearn_shapespace_and_AE.py
88 lines (59 loc) · 2.54 KB
/
learn_shapespace_and_AE.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from shapemaker import *
####################
# Settings #########
####################
# Training Parameters
NUM_TRAINING_SESSIONS = 70000
START_LEARNING_RATE = 0.01
PATIENCE = 1500
NUM_NODES = 512
FOURIER_FEATUERS = True
SIGMA = 5.0
MONTE_CARLO_SAMPLES = 2000
SHAPES_EACH_STEP = 16
EPSILON = .0001
CONSTANT = 40. if FOURIER_FEATUERS else 10.0
# Network Design
FEATURE_DIMENSION = 12
SIZE_POINTCLOUD = 23725
TOTAL_SHAPES = 100
####################
# Main #############
####################
# Load autoencoder
autoencoder = PointNetAutoEncoder(3,SIZE_POINTCLOUD,FEATURE_DIMENSION)
autoencoder.to(device)
# Load dataset
dataset = np.load(open(r"dataset/dataset_faces100.npy", "rb"),allow_pickle=True)
# Setup Shape Space Learning Network
network = FeatureSpaceNetwork2(3, [520]*7 , [4], FourierFeatures=FOURIER_FEATUERS, num_features = 8, sigma = SIGMA, feature_space=FEATURE_DIMENSION, geometric_init=False )
network.to(device)
all_params = chain(network.parameters(), autoencoder.parameters())
optimizer = torch.optim.Adam(all_params, START_LEARNING_RATE)
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=PATIENCE, verbose=False)
# Check if it really trains both networks at the same time | Part 1
#print(autoencoder(Variable( Tensor( np.array([ np.array(dataset[1][0]).T])) , requires_grad=True).to(device)))
for i in range(NUM_TRAINING_SESSIONS+1):
network.zero_grad()
autoencoder.zero_grad()
loss = 0.0
shape_batch = np.random.choice(TOTAL_SHAPES, SHAPES_EACH_STEP, replace=False)
for index in shape_batch:
shape = dataset[index][0]#[:,:num_points]
pointcloud = Variable( Tensor(shape) , requires_grad=False).to(device)
cloudT = Tensor( np.array([ np.array(shape).T]))
pointcloudT = Variable( cloudT , requires_grad=True).to(device)
rec, latent = autoencoder(pointcloudT)
latent = torch.ravel(latent)
loss += AT_loss_shapespace2(network, pointcloud, EPSILON, MONTE_CARLO_SAMPLES, CONSTANT, latent )
if (i%10==0):
report_progress(i, NUM_TRAINING_SESSIONS , loss.detach().cpu().numpy() )
# backpropagation
loss.backward( )
optimizer.step()
scheduler.step(loss)
# Check if it really trains both networks at the same time | Part 2
#print(autoencoder(Variable( Tensor( np.array([ np.array(dataset[1][0]).T])) , requires_grad=True).to(device)))
torch.save(network.state_dict(), r"models/face_space5.pth")
torch.save(autoencoder.state_dict(), r"models/face_ae5.pth")
print("Finished")