Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update black #6

Merged
merged 2 commits into from
May 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ jobs:
- uses: actions/checkout@v2
- uses: psf/black@stable
with:
version: '19.10b0'
version: '22.3.0'
2 changes: 1 addition & 1 deletion dump_graph_model_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def preprocess(x):
return tf.concat([preprocess_features(x), latent_input], axis=-1)

def postprocess(x):
x = 10 ** x - 1
x = 10**x - 1
return tf.where(x < 1.0, 0.0, x)

dump_graph.model_to_graph(
Expand Down
2 changes: 1 addition & 1 deletion legacy_code/models/baseline_fc_v4_8x16.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def gradient_penalty_on_data(self, features, real):
# if self.cramer:
# d_real = tf.norm(d_real, axis=-1)
grads = tf.reshape(t.gradient(d_real, real), [len(real), -1])
return tf.reduce_mean(tf.reduce_sum(grads ** 2, axis=-1))
return tf.reduce_mean(tf.reduce_sum(grads**2, axis=-1))

@tf.function
def calculate_losses(self, feature_batch, target_batch):
Expand Down
2 changes: 1 addition & 1 deletion legacy_code/models/baseline_v2_10x10.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def gradient_penalty_on_data(self, features, real):
# if self.cramer:
# d_real = tf.norm(d_real, axis=-1)
grads = tf.reshape(t.gradient(d_real, real), [len(real), -1])
return tf.reduce_mean(tf.reduce_sum(grads ** 2, axis=-1))
return tf.reduce_mean(tf.reduce_sum(grads**2, axis=-1))

@tf.function
def calculate_losses(self, feature_batch, target_batch):
Expand Down
2 changes: 1 addition & 1 deletion legacy_code/models/baseline_v3_6x15.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def gradient_penalty_on_data(self, features, real):
# if self.cramer:
# d_real = tf.norm(d_real, axis=-1)
grads = tf.reshape(t.gradient(d_real, real), [len(real), -1])
return tf.reduce_mean(tf.reduce_sum(grads ** 2, axis=-1))
return tf.reduce_mean(tf.reduce_sum(grads**2, axis=-1))

@tf.function
def calculate_losses(self, feature_batch, target_batch):
Expand Down
2 changes: 1 addition & 1 deletion legacy_code/models/baseline_v4_8x16.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def gradient_penalty_on_data(self, features, real):
# if self.cramer:
# d_real = tf.norm(d_real, axis=-1)
grads = tf.reshape(t.gradient(d_real, real), [len(real), -1])
return tf.reduce_mean(tf.reduce_sum(grads ** 2, axis=-1))
return tf.reduce_mean(tf.reduce_sum(grads**2, axis=-1))

@tf.function
def calculate_losses(self, feature_batch, target_batch):
Expand Down
2 changes: 1 addition & 1 deletion legacy_code/test_script_data_v0.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@


def unscale(x):
return 10 ** x - 1
return 10**x - 1


def write_hist_summary(step):
Expand Down
2 changes: 1 addition & 1 deletion legacy_code/test_script_data_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def save_model(step):
writer_val = tf.summary.create_file_writer(f'logs/{args.checkpoint_name}/validation')

def unscale(x):
return 10 ** x - 1
return 10**x - 1

def write_hist_summary(step):
if step % args.save_every == 0:
Expand Down
2 changes: 1 addition & 1 deletion legacy_code/test_script_data_v1_normed.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@


def unscale(x):
return 10 ** x - 1
return 10**x - 1


def write_hist_summary(step):
Expand Down
2 changes: 1 addition & 1 deletion legacy_code/test_script_data_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def save_model(step):
writer_val = tf.summary.create_file_writer(f'logs/{args.checkpoint_name}/validation')

def unscale(x):
return 10 ** x - 1
return 10**x - 1

def get_images(return_raw_data=False, calc_chi2=False, gen_more=None, sample=(X_test, Y_test), batch_size=128):
X, Y = sample
Expand Down
2 changes: 1 addition & 1 deletion legacy_code/test_script_data_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def save_model(step):
writer_val = tf.summary.create_file_writer(f'logs/{args.checkpoint_name}/validation')

def unscale(x):
return 10 ** x - 1
return 10**x - 1

def get_images(return_raw_data=False, calc_chi2=False, gen_more=None, sample=(X_test, Y_test), batch_size=128):
X, Y = sample
Expand Down
6 changes: 3 additions & 3 deletions metrics/trends.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def stats(arr):
).T

if do_plot:
mean_p_std_err = (mean_err ** 2 + std_err ** 2) ** 0.5
mean_p_std_err = (mean_err**2 + std_err**2) ** 0.5
plt.fill_between(bin_centers, mean - mean_err, mean + mean_err, **kwargs)
kwargs['alpha'] *= 0.5
kwargs = {k: v for k, v in kwargs.items() if k != 'label'}
Expand Down Expand Up @@ -78,11 +78,11 @@ def make_trend_plot(feature_real, real, feature_gen, gen, name, calc_chi2=False,

gen_upper = gen_mean + gen_std
gen_lower = gen_mean - gen_std
gen_err2 = gen_mean_err ** 2 + gen_std_err ** 2
gen_err2 = gen_mean_err**2 + gen_std_err**2

real_upper = real_mean + real_std
real_lower = real_mean - real_std
real_err2 = real_mean_err ** 2 + real_std_err ** 2
real_err2 = real_mean_err**2 + real_std_err**2

chi2 = ((gen_upper - real_upper) ** 2 / (gen_err2 + real_err2)).sum() + (
(gen_lower - real_lower) ** 2 / (gen_err2 + real_err2)
Expand Down
2 changes: 1 addition & 1 deletion models/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,6 @@ def get_scheduler(lr, lr_decay):
return eval(lr_decay)

def schedule_lr(step):
return lr * lr_decay ** step
return lr * lr_decay**step

return schedule_lr
2 changes: 1 addition & 1 deletion models/model_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def gradient_penalty_on_data(self, features, real):
d_real = self.discriminator([_f(features), real])

grads = tf.reshape(t.gradient(d_real, real), [len(real), -1])
return tf.reduce_mean(tf.reduce_sum(grads ** 2, axis=-1))
return tf.reduce_mean(tf.reduce_sum(grads**2, axis=-1))

@tf.function
def calculate_losses(self, feature_batch, target_batch):
Expand Down
2 changes: 1 addition & 1 deletion models/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def vector_img_connect_block(vector_shape, img_shape, block, vector_bypass=False
if concat_outputs:
outputs = tf.keras.layers.Concatenate(axis=-1)(outputs)

args = dict(inputs=[input_vec, input_img], outputs=outputs,)
args = dict(inputs=[input_vec, input_img], outputs=outputs)

if name:
args['name'] = name
Expand Down
2 changes: 1 addition & 1 deletion models/scalers.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def scale(self, x):
return np.log10(1 + x)

def unscale(self, x):
return 10 ** x - 1
return 10**x - 1


class Gaussian:
Expand Down