Skip to content

Commit fe6d374

Browse files
author
Francisco Santos
committed
Use BaseModel variables
1 parent 95bbab0 commit fe6d374

File tree

1 file changed

+14
-14
lines changed
  • src/ydata_synthetic/synthesizers/regular/pategan

1 file changed

+14
-14
lines changed

src/ydata_synthetic/synthesizers/regular/pategan/model.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def _moments_acc(n_teachers, votes, lap_scale, l_list):
6262
update = []
6363
for l in l_list:
6464
clip = 2 * square(lap_scale) * l * (l + 1)
65-
t = (1 - q) * pow((1 - q) / (1 - exp(2*lap_scale) * q), l) + q * exp(2 * lap_scale * l)
65+
t = (1 - q) * pow((1 - q) / (1 - exp(2 * lap_scale) * q), l) + q * exp(2 * lap_scale * l)
6666
update.append(reduce_sum(clip_by_value(t, clip_value_min=-clip, clip_value_max=clip)))
6767
return cast(update, dtype=float64)
6868

@@ -121,23 +121,23 @@ def train(self, data, class_ratios, train_arguments: TrainParameters, num_cols:
121121
for i in range(self.n_teachers):
122122
inputs, categories = None, None
123123
for b, data_ in enumerate(train_loader[i]):
124-
inputs, categories = data_, b
124+
inputs, categories = data_, b # categories = 0, data_ holds the first batch, why do we do this?
125125
#categories will give zero value in each loop as the loop break after running the first time
126126
#inputs will have only the first batch of data
127127
break
128128

129129
with GradientTape() as disc_tape:
130130
# train with real
131-
dis_data = concat([inputs, zeros([inputs.shape[0], 1], dtype=float64)], 1)
131+
dis_data = concat([inputs, zeros((self.batch_size, 1), dtype=float64)], 1) # Why do we append a column of zeros instead of categories?
132132
# print("1st batch data", dis_data.shape)
133133
real_output = self.t_discriminators[i](dis_data, training=True)
134134
# print(real_output.shape, tf.ones.shape)
135135

136136
# train with fake
137-
z = uniform([inputs.shape[0], self.z_dim], dtype=float64)
137+
z = uniform([self.batch_size, self.noise_dim], dtype=float64)
138138
# print("uniformly distributed noise", z.shape)
139139

140-
sample = expand_dims(category_samples.sample(inputs.shape[0]), axis=1)
140+
sample = expand_dims(category_samples.sample(self.batch_size), axis=1)
141141
# print("category", sample.shape)
142142

143143
fake = self.generator(concat([z, sample], 1))
@@ -153,16 +153,16 @@ def train(self, data, class_ratios, train_arguments: TrainParameters, num_cols:
153153
disc_loss = real_loss_disc + fake_loss_disc
154154
# print(disc_loss, real_loss_disc, fake_loss_disc)
155155

156-
gradients_of_discriminator = disc_tape.gradient(disc_loss, self.t_discriminators[i].trainable_variables)
156+
disc_grad = disc_tape.gradient(disc_loss, self.t_discriminators[i].trainable_variables)
157157
# print(gradients_of_discriminator)
158158

159-
disc_opt_t[i].apply_gradients(zip(gradients_of_discriminator, self.t_discriminators[i].trainable_variables))
159+
disc_opt_t[i].apply_gradients(zip(disc_grad, self.t_discriminators[i].trainable_variables))
160160

161161
# train the student discriminator
162162
for t_3 in range(train_arguments.num_student_iters):
163-
z = uniform([inputs.shape[0], self.z_dim], dtype=float64)
163+
z = uniform([self.batch_size, self.noise_dim], dtype=float64)
164164

165-
sample = expand_dims(category_samples.sample(inputs.shape[0]), axis=1)
165+
sample = expand_dims(category_samples.sample(self.batch_size), axis=1)
166166
# print("category_stu", sample.shape)
167167

168168
with GradientTape() as stu_tape:
@@ -185,9 +185,9 @@ def train(self, data, class_ratios, train_arguments: TrainParameters, num_cols:
185185
disc_opt_stu.apply_gradients(zip(gradients_of_stu, self.s_discriminator.trainable_variables))
186186

187187
# train the generator
188-
z = uniform([inputs.shape[0], self.z_dim], dtype=float64)
188+
z = uniform([self.batch_size, self.noise_dim], dtype=float64)
189189

190-
sample_g = expand_dims(category_samples.sample(inputs.shape[0]), axis=1)
190+
sample_g = expand_dims(category_samples.sample(self.batch_size), axis=1)
191191

192192
with GradientTape() as gen_tape:
193193
fake = self.generator(concat([z, sample_g], 1))
@@ -198,8 +198,8 @@ def train(self, data, class_ratios, train_arguments: TrainParameters, num_cols:
198198
generator_optimizer.apply_gradients(zip(gradients_of_generator, self.generator.trainable_variables))
199199

200200
# Calculate the current privacy cost
201-
epsilon = min((alpha - log(self.delta)) / l_list)
202-
if steps % 1 == 0:
201+
epsilon = min((alpha - log(train_arguments.delta)) / l_list)
202+
if steps % train_arguments.sample_interval == 0:
203203
print("Step : ", steps, "Loss SD : ", stu_loss, "Loss G : ", loss_gen, "Epsilon : ", epsilon)
204204

205205
steps += 1
@@ -208,7 +208,7 @@ def train(self, data, class_ratios, train_arguments: TrainParameters, num_cols:
208208
def _pate_voting(self, data, netTD, lap_scale):
209209
# TODO: Validate the logic against original article
210210
## Faz os votos dos teachers (1/0) netTD para cada record em data e guarda em results
211-
results = zeros([len(netTD), data.shape[0]], dtype=int64)
211+
results = zeros([len(netTD), self.batch_size], dtype=int64)
212212
# print(results)
213213
for i in range(len(netTD)):
214214
output = netTD[i](data, training=True)

0 commit comments

Comments
 (0)